Print this page
patch as-lock-macro-simplification
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/i86pc/vm/hat_i86.c
+++ new/usr/src/uts/i86pc/vm/hat_i86.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21 /*
22 22 * Copyright (c) 1992, 2010, Oracle and/or its affiliates. All rights reserved.
23 23 */
24 24 /*
25 25 * Copyright (c) 2010, Intel Corporation.
26 26 * All rights reserved.
27 27 */
28 28 /*
29 29 * Copyright 2011 Nexenta Systems, Inc. All rights reserved.
30 30 * Copyright (c) 2014, 2015 by Delphix. All rights reserved.
31 31 */
32 32
33 33 /*
34 34 * VM - Hardware Address Translation management for i386 and amd64
35 35 *
36 36 * Implementation of the interfaces described in <common/vm/hat.h>
37 37 *
38 38 * Nearly all the details of how the hardware is managed should not be
39 39 * visible outside this layer except for misc. machine specific functions
40 40 * that work in conjunction with this code.
41 41 *
42 42 * Routines used only inside of i86pc/vm start with hati_ for HAT Internal.
43 43 */
44 44
45 45 #include <sys/machparam.h>
46 46 #include <sys/machsystm.h>
47 47 #include <sys/mman.h>
48 48 #include <sys/types.h>
49 49 #include <sys/systm.h>
50 50 #include <sys/cpuvar.h>
51 51 #include <sys/thread.h>
52 52 #include <sys/proc.h>
53 53 #include <sys/cpu.h>
54 54 #include <sys/kmem.h>
55 55 #include <sys/disp.h>
56 56 #include <sys/shm.h>
57 57 #include <sys/sysmacros.h>
58 58 #include <sys/machparam.h>
59 59 #include <sys/vmem.h>
60 60 #include <sys/vmsystm.h>
61 61 #include <sys/promif.h>
62 62 #include <sys/var.h>
63 63 #include <sys/x86_archext.h>
64 64 #include <sys/atomic.h>
65 65 #include <sys/bitmap.h>
66 66 #include <sys/controlregs.h>
67 67 #include <sys/bootconf.h>
68 68 #include <sys/bootsvcs.h>
69 69 #include <sys/bootinfo.h>
70 70 #include <sys/archsystm.h>
71 71
72 72 #include <vm/seg_kmem.h>
73 73 #include <vm/hat_i86.h>
74 74 #include <vm/as.h>
75 75 #include <vm/seg.h>
76 76 #include <vm/page.h>
77 77 #include <vm/seg_kp.h>
78 78 #include <vm/seg_kpm.h>
79 79 #include <vm/vm_dep.h>
80 80 #ifdef __xpv
81 81 #include <sys/hypervisor.h>
82 82 #endif
83 83 #include <vm/kboot_mmu.h>
84 84 #include <vm/seg_spt.h>
85 85
86 86 #include <sys/cmn_err.h>
87 87
88 88 /*
89 89 * Basic parameters for hat operation.
90 90 */
91 91 struct hat_mmu_info mmu;
92 92
93 93 /*
94 94 * The page that is the kernel's top level pagetable.
95 95 *
96 96 * For 32 bit PAE support on i86pc, the kernel hat will use the 1st 4 entries
97 97 * on this 4K page for its top level page table. The remaining groups of
98 98 * 4 entries are used for per processor copies of user VLP pagetables for
99 99 * running threads. See hat_switch() and reload_pae32() for details.
100 100 *
101 101 * vlp_page[0..3] - level==2 PTEs for kernel HAT
102 102 * vlp_page[4..7] - level==2 PTEs for user thread on cpu 0
103 103 * vlp_page[8..11] - level==2 PTE for user thread on cpu 1
104 104 * etc...
105 105 */
106 106 static x86pte_t *vlp_page;
107 107
108 108 /*
109 109 * forward declaration of internal utility routines
110 110 */
111 111 static x86pte_t hati_update_pte(htable_t *ht, uint_t entry, x86pte_t expected,
112 112 x86pte_t new);
113 113
114 114 /*
115 115 * The kernel address space exists in all HATs. To implement this the
116 116 * kernel reserves a fixed number of entries in the topmost level(s) of page
117 117 * tables. The values are setup during startup and then copied to every user
118 118 * hat created by hat_alloc(). This means that kernelbase must be:
119 119 *
120 120 * 4Meg aligned for 32 bit kernels
121 121 * 512Gig aligned for x86_64 64 bit kernel
122 122 *
123 123 * The hat_kernel_range_ts describe what needs to be copied from kernel hat
124 124 * to each user hat.
125 125 */
126 126 typedef struct hat_kernel_range {
127 127 level_t hkr_level;
128 128 uintptr_t hkr_start_va;
129 129 uintptr_t hkr_end_va; /* zero means to end of memory */
130 130 } hat_kernel_range_t;
131 131 #define NUM_KERNEL_RANGE 2
132 132 static hat_kernel_range_t kernel_ranges[NUM_KERNEL_RANGE];
133 133 static int num_kernel_ranges;
134 134
135 135 uint_t use_boot_reserve = 1; /* cleared after early boot process */
136 136 uint_t can_steal_post_boot = 0; /* set late in boot to enable stealing */
137 137
138 138 /*
139 139 * enable_1gpg: controls 1g page support for user applications.
140 140 * By default, 1g pages are exported to user applications. enable_1gpg can
141 141 * be set to 0 to not export.
142 142 */
143 143 int enable_1gpg = 1;
144 144
145 145 /*
146 146 * AMD shanghai processors provide better management of 1gb ptes in its tlb.
147 147 * By default, 1g page support will be disabled for pre-shanghai AMD
148 148 * processors that don't have optimal tlb support for the 1g page size.
149 149 * chk_optimal_1gtlb can be set to 0 to force 1g page support on sub-optimal
150 150 * processors.
151 151 */
152 152 int chk_optimal_1gtlb = 1;
153 153
154 154
155 155 #ifdef DEBUG
156 156 uint_t map1gcnt;
157 157 #endif
158 158
159 159
160 160 /*
161 161 * A cpuset for all cpus. This is used for kernel address cross calls, since
162 162 * the kernel addresses apply to all cpus.
163 163 */
164 164 cpuset_t khat_cpuset;
165 165
166 166 /*
167 167 * management stuff for hat structures
168 168 */
169 169 kmutex_t hat_list_lock;
170 170 kcondvar_t hat_list_cv;
171 171 kmem_cache_t *hat_cache;
172 172 kmem_cache_t *hat_hash_cache;
173 173 kmem_cache_t *vlp_hash_cache;
174 174
175 175 /*
176 176 * Simple statistics
177 177 */
178 178 struct hatstats hatstat;
179 179
180 180 /*
181 181 * Some earlier hypervisor versions do not emulate cmpxchg of PTEs
182 182 * correctly. For such hypervisors we must set PT_USER for kernel
183 183 * entries ourselves (normally the emulation would set PT_USER for
184 184 * kernel entries and PT_USER|PT_GLOBAL for user entries). pt_kern is
185 185 * thus set appropriately. Note that dboot/kbm is OK, as only the full
186 186 * HAT uses cmpxchg() and the other paths (hypercall etc.) were never
187 187 * incorrect.
188 188 */
189 189 int pt_kern;
190 190
191 191 /*
192 192 * useful stuff for atomic access/clearing/setting REF/MOD/RO bits in page_t's.
193 193 */
194 194 extern void atomic_orb(uchar_t *addr, uchar_t val);
195 195 extern void atomic_andb(uchar_t *addr, uchar_t val);
196 196
197 197 #ifndef __xpv
198 198 extern pfn_t memseg_get_start(struct memseg *);
199 199 #endif
200 200
201 201 #define PP_GETRM(pp, rmmask) (pp->p_nrm & rmmask)
202 202 #define PP_ISMOD(pp) PP_GETRM(pp, P_MOD)
203 203 #define PP_ISREF(pp) PP_GETRM(pp, P_REF)
204 204 #define PP_ISRO(pp) PP_GETRM(pp, P_RO)
205 205
206 206 #define PP_SETRM(pp, rm) atomic_orb(&(pp->p_nrm), rm)
207 207 #define PP_SETMOD(pp) PP_SETRM(pp, P_MOD)
208 208 #define PP_SETREF(pp) PP_SETRM(pp, P_REF)
209 209 #define PP_SETRO(pp) PP_SETRM(pp, P_RO)
210 210
211 211 #define PP_CLRRM(pp, rm) atomic_andb(&(pp->p_nrm), ~(rm))
212 212 #define PP_CLRMOD(pp) PP_CLRRM(pp, P_MOD)
213 213 #define PP_CLRREF(pp) PP_CLRRM(pp, P_REF)
214 214 #define PP_CLRRO(pp) PP_CLRRM(pp, P_RO)
215 215 #define PP_CLRALL(pp) PP_CLRRM(pp, P_MOD | P_REF | P_RO)
216 216
217 217 /*
218 218 * kmem cache constructor for struct hat
219 219 */
220 220 /*ARGSUSED*/
221 221 static int
222 222 hati_constructor(void *buf, void *handle, int kmflags)
223 223 {
224 224 hat_t *hat = buf;
225 225
226 226 mutex_init(&hat->hat_mutex, NULL, MUTEX_DEFAULT, NULL);
227 227 bzero(hat->hat_pages_mapped,
228 228 sizeof (pgcnt_t) * (mmu.max_page_level + 1));
229 229 hat->hat_ism_pgcnt = 0;
230 230 hat->hat_stats = 0;
231 231 hat->hat_flags = 0;
232 232 CPUSET_ZERO(hat->hat_cpus);
233 233 hat->hat_htable = NULL;
234 234 hat->hat_ht_hash = NULL;
235 235 return (0);
236 236 }
237 237
238 238 /*
239 239 * Allocate a hat structure for as. We also create the top level
240 240 * htable and initialize it to contain the kernel hat entries.
241 241 */
242 242 hat_t *
243 243 hat_alloc(struct as *as)
244 244 {
245 245 hat_t *hat;
246 246 htable_t *ht; /* top level htable */
247 247 uint_t use_vlp;
248 248 uint_t r;
249 249 hat_kernel_range_t *rp;
250 250 uintptr_t va;
251 251 uintptr_t eva;
252 252 uint_t start;
↓ open down ↓ |
252 lines elided |
↑ open up ↑ |
253 253 uint_t cnt;
254 254 htable_t *src;
255 255
256 256 /*
257 257 * Once we start creating user process HATs we can enable
258 258 * the htable_steal() code.
259 259 */
260 260 if (can_steal_post_boot == 0)
261 261 can_steal_post_boot = 1;
262 262
263 - ASSERT(AS_WRITE_HELD(as, &as->a_lock));
263 + ASSERT(AS_WRITE_HELD(as));
264 264 hat = kmem_cache_alloc(hat_cache, KM_SLEEP);
265 265 hat->hat_as = as;
266 266 mutex_init(&hat->hat_mutex, NULL, MUTEX_DEFAULT, NULL);
267 267 ASSERT(hat->hat_flags == 0);
268 268
269 269 #if defined(__xpv)
270 270 /*
271 271 * No VLP stuff on the hypervisor due to the 64-bit split top level
272 272 * page tables. On 32-bit it's not needed as the hypervisor takes
273 273 * care of copying the top level PTEs to a below 4Gig page.
274 274 */
275 275 use_vlp = 0;
276 276 #else /* __xpv */
277 277 /* 32 bit processes uses a VLP style hat when running with PAE */
278 278 #if defined(__amd64)
279 279 use_vlp = (ttoproc(curthread)->p_model == DATAMODEL_ILP32);
280 280 #elif defined(__i386)
281 281 use_vlp = mmu.pae_hat;
282 282 #endif
283 283 #endif /* __xpv */
284 284 if (use_vlp) {
285 285 hat->hat_flags = HAT_VLP;
286 286 bzero(hat->hat_vlp_ptes, VLP_SIZE);
287 287 }
288 288
289 289 /*
290 290 * Allocate the htable hash
291 291 */
292 292 if ((hat->hat_flags & HAT_VLP)) {
293 293 hat->hat_num_hash = mmu.vlp_hash_cnt;
294 294 hat->hat_ht_hash = kmem_cache_alloc(vlp_hash_cache, KM_SLEEP);
295 295 } else {
296 296 hat->hat_num_hash = mmu.hash_cnt;
297 297 hat->hat_ht_hash = kmem_cache_alloc(hat_hash_cache, KM_SLEEP);
298 298 }
299 299 bzero(hat->hat_ht_hash, hat->hat_num_hash * sizeof (htable_t *));
300 300
301 301 /*
302 302 * Initialize Kernel HAT entries at the top of the top level page
303 303 * tables for the new hat.
304 304 */
305 305 hat->hat_htable = NULL;
306 306 hat->hat_ht_cached = NULL;
307 307 XPV_DISALLOW_MIGRATE();
308 308 ht = htable_create(hat, (uintptr_t)0, TOP_LEVEL(hat), NULL);
309 309 hat->hat_htable = ht;
310 310
311 311 #if defined(__amd64)
312 312 if (hat->hat_flags & HAT_VLP)
313 313 goto init_done;
314 314 #endif
315 315
316 316 for (r = 0; r < num_kernel_ranges; ++r) {
317 317 rp = &kernel_ranges[r];
318 318 for (va = rp->hkr_start_va; va != rp->hkr_end_va;
319 319 va += cnt * LEVEL_SIZE(rp->hkr_level)) {
320 320
321 321 if (rp->hkr_level == TOP_LEVEL(hat))
322 322 ht = hat->hat_htable;
323 323 else
324 324 ht = htable_create(hat, va, rp->hkr_level,
325 325 NULL);
326 326
327 327 start = htable_va2entry(va, ht);
328 328 cnt = HTABLE_NUM_PTES(ht) - start;
329 329 eva = va +
330 330 ((uintptr_t)cnt << LEVEL_SHIFT(rp->hkr_level));
331 331 if (rp->hkr_end_va != 0 &&
332 332 (eva > rp->hkr_end_va || eva == 0))
333 333 cnt = htable_va2entry(rp->hkr_end_va, ht) -
334 334 start;
335 335
336 336 #if defined(__i386) && !defined(__xpv)
337 337 if (ht->ht_flags & HTABLE_VLP) {
338 338 bcopy(&vlp_page[start],
339 339 &hat->hat_vlp_ptes[start],
340 340 cnt * sizeof (x86pte_t));
341 341 continue;
342 342 }
343 343 #endif
344 344 src = htable_lookup(kas.a_hat, va, rp->hkr_level);
345 345 ASSERT(src != NULL);
346 346 x86pte_copy(src, ht, start, cnt);
347 347 htable_release(src);
348 348 }
349 349 }
350 350
351 351 init_done:
352 352
353 353 #if defined(__xpv)
354 354 /*
355 355 * Pin top level page tables after initializing them
356 356 */
357 357 xen_pin(hat->hat_htable->ht_pfn, mmu.max_level);
358 358 #if defined(__amd64)
359 359 xen_pin(hat->hat_user_ptable, mmu.max_level);
360 360 #endif
361 361 #endif
362 362 XPV_ALLOW_MIGRATE();
363 363
364 364 /*
365 365 * Put it at the start of the global list of all hats (used by stealing)
366 366 *
367 367 * kas.a_hat is not in the list but is instead used to find the
368 368 * first and last items in the list.
369 369 *
370 370 * - kas.a_hat->hat_next points to the start of the user hats.
371 371 * The list ends where hat->hat_next == NULL
372 372 *
373 373 * - kas.a_hat->hat_prev points to the last of the user hats.
374 374 * The list begins where hat->hat_prev == NULL
375 375 */
376 376 mutex_enter(&hat_list_lock);
377 377 hat->hat_prev = NULL;
378 378 hat->hat_next = kas.a_hat->hat_next;
379 379 if (hat->hat_next)
380 380 hat->hat_next->hat_prev = hat;
381 381 else
382 382 kas.a_hat->hat_prev = hat;
383 383 kas.a_hat->hat_next = hat;
384 384 mutex_exit(&hat_list_lock);
385 385
↓ open down ↓ |
112 lines elided |
↑ open up ↑ |
386 386 return (hat);
387 387 }
388 388
389 389 /*
390 390 * process has finished executing but as has not been cleaned up yet.
391 391 */
392 392 /*ARGSUSED*/
393 393 void
394 394 hat_free_start(hat_t *hat)
395 395 {
396 - ASSERT(AS_WRITE_HELD(hat->hat_as, &hat->hat_as->a_lock));
396 + ASSERT(AS_WRITE_HELD(hat->hat_as));
397 397
398 398 /*
399 399 * If the hat is currently a stealing victim, wait for the stealing
400 400 * to finish. Once we mark it as HAT_FREEING, htable_steal()
401 401 * won't look at its pagetables anymore.
402 402 */
403 403 mutex_enter(&hat_list_lock);
404 404 while (hat->hat_flags & HAT_VICTIM)
405 405 cv_wait(&hat_list_cv, &hat_list_lock);
406 406 hat->hat_flags |= HAT_FREEING;
407 407 mutex_exit(&hat_list_lock);
408 408 }
409 409
410 410 /*
411 411 * An address space is being destroyed, so we destroy the associated hat.
412 412 */
413 413 void
414 414 hat_free_end(hat_t *hat)
415 415 {
416 416 kmem_cache_t *cache;
417 417
418 418 ASSERT(hat->hat_flags & HAT_FREEING);
419 419
420 420 /*
421 421 * must not be running on the given hat
422 422 */
423 423 ASSERT(CPU->cpu_current_hat != hat);
424 424
425 425 /*
426 426 * Remove it from the list of HATs
427 427 */
428 428 mutex_enter(&hat_list_lock);
429 429 if (hat->hat_prev)
430 430 hat->hat_prev->hat_next = hat->hat_next;
431 431 else
432 432 kas.a_hat->hat_next = hat->hat_next;
433 433 if (hat->hat_next)
434 434 hat->hat_next->hat_prev = hat->hat_prev;
435 435 else
436 436 kas.a_hat->hat_prev = hat->hat_prev;
437 437 mutex_exit(&hat_list_lock);
438 438 hat->hat_next = hat->hat_prev = NULL;
439 439
440 440 #if defined(__xpv)
441 441 /*
442 442 * On the hypervisor, unpin top level page table(s)
443 443 */
444 444 xen_unpin(hat->hat_htable->ht_pfn);
445 445 #if defined(__amd64)
446 446 xen_unpin(hat->hat_user_ptable);
447 447 #endif
448 448 #endif
449 449
450 450 /*
451 451 * Make a pass through the htables freeing them all up.
452 452 */
453 453 htable_purge_hat(hat);
454 454
455 455 /*
456 456 * Decide which kmem cache the hash table came from, then free it.
457 457 */
458 458 if (hat->hat_flags & HAT_VLP)
459 459 cache = vlp_hash_cache;
460 460 else
461 461 cache = hat_hash_cache;
462 462 kmem_cache_free(cache, hat->hat_ht_hash);
463 463 hat->hat_ht_hash = NULL;
464 464
465 465 hat->hat_flags = 0;
466 466 kmem_cache_free(hat_cache, hat);
467 467 }
468 468
469 469 /*
470 470 * round kernelbase down to a supported value to use for _userlimit
471 471 *
472 472 * userlimit must be aligned down to an entry in the top level htable.
473 473 * The one exception is for 32 bit HAT's running PAE.
474 474 */
475 475 uintptr_t
476 476 hat_kernelbase(uintptr_t va)
477 477 {
478 478 #if defined(__i386)
479 479 va &= LEVEL_MASK(1);
480 480 #endif
481 481 if (IN_VA_HOLE(va))
482 482 panic("_userlimit %p will fall in VA hole\n", (void *)va);
483 483 return (va);
484 484 }
485 485
486 486 /*
487 487 *
488 488 */
489 489 static void
490 490 set_max_page_level()
491 491 {
492 492 level_t lvl;
493 493
494 494 if (!kbm_largepage_support) {
495 495 lvl = 0;
496 496 } else {
497 497 if (is_x86_feature(x86_featureset, X86FSET_1GPG)) {
498 498 lvl = 2;
499 499 if (chk_optimal_1gtlb &&
500 500 cpuid_opteron_erratum(CPU, 6671130)) {
501 501 lvl = 1;
502 502 }
503 503 if (plat_mnode_xcheck(LEVEL_SIZE(2) >>
504 504 LEVEL_SHIFT(0))) {
505 505 lvl = 1;
506 506 }
507 507 } else {
508 508 lvl = 1;
509 509 }
510 510 }
511 511 mmu.max_page_level = lvl;
512 512
513 513 if ((lvl == 2) && (enable_1gpg == 0))
514 514 mmu.umax_page_level = 1;
515 515 else
516 516 mmu.umax_page_level = lvl;
517 517 }
518 518
519 519 /*
520 520 * Initialize hat data structures based on processor MMU information.
521 521 */
522 522 void
523 523 mmu_init(void)
524 524 {
525 525 uint_t max_htables;
526 526 uint_t pa_bits;
527 527 uint_t va_bits;
528 528 int i;
529 529
530 530 /*
531 531 * If CPU enabled the page table global bit, use it for the kernel
532 532 * This is bit 7 in CR4 (PGE - Page Global Enable).
533 533 */
534 534 if (is_x86_feature(x86_featureset, X86FSET_PGE) &&
535 535 (getcr4() & CR4_PGE) != 0)
536 536 mmu.pt_global = PT_GLOBAL;
537 537
538 538 /*
539 539 * Detect NX and PAE usage.
540 540 */
541 541 mmu.pae_hat = kbm_pae_support;
542 542 if (kbm_nx_support)
543 543 mmu.pt_nx = PT_NX;
544 544 else
545 545 mmu.pt_nx = 0;
546 546
547 547 /*
548 548 * Use CPU info to set various MMU parameters
549 549 */
550 550 cpuid_get_addrsize(CPU, &pa_bits, &va_bits);
551 551
552 552 if (va_bits < sizeof (void *) * NBBY) {
553 553 mmu.hole_start = (1ul << (va_bits - 1));
554 554 mmu.hole_end = 0ul - mmu.hole_start - 1;
555 555 } else {
556 556 mmu.hole_end = 0;
557 557 mmu.hole_start = mmu.hole_end - 1;
558 558 }
559 559 #if defined(OPTERON_ERRATUM_121)
560 560 /*
561 561 * If erratum 121 has already been detected at this time, hole_start
562 562 * contains the value to be subtracted from mmu.hole_start.
563 563 */
564 564 ASSERT(hole_start == 0 || opteron_erratum_121 != 0);
565 565 hole_start = mmu.hole_start - hole_start;
566 566 #else
567 567 hole_start = mmu.hole_start;
568 568 #endif
569 569 hole_end = mmu.hole_end;
570 570
571 571 mmu.highest_pfn = mmu_btop((1ull << pa_bits) - 1);
572 572 if (mmu.pae_hat == 0 && pa_bits > 32)
573 573 mmu.highest_pfn = PFN_4G - 1;
574 574
575 575 if (mmu.pae_hat) {
576 576 mmu.pte_size = 8; /* 8 byte PTEs */
577 577 mmu.pte_size_shift = 3;
578 578 } else {
579 579 mmu.pte_size = 4; /* 4 byte PTEs */
580 580 mmu.pte_size_shift = 2;
581 581 }
582 582
583 583 if (mmu.pae_hat && !is_x86_feature(x86_featureset, X86FSET_PAE))
584 584 panic("Processor does not support PAE");
585 585
586 586 if (!is_x86_feature(x86_featureset, X86FSET_CX8))
587 587 panic("Processor does not support cmpxchg8b instruction");
588 588
589 589 #if defined(__amd64)
590 590
591 591 mmu.num_level = 4;
592 592 mmu.max_level = 3;
593 593 mmu.ptes_per_table = 512;
594 594 mmu.top_level_count = 512;
595 595
596 596 mmu.level_shift[0] = 12;
597 597 mmu.level_shift[1] = 21;
598 598 mmu.level_shift[2] = 30;
599 599 mmu.level_shift[3] = 39;
600 600
601 601 #elif defined(__i386)
602 602
603 603 if (mmu.pae_hat) {
604 604 mmu.num_level = 3;
605 605 mmu.max_level = 2;
606 606 mmu.ptes_per_table = 512;
607 607 mmu.top_level_count = 4;
608 608
609 609 mmu.level_shift[0] = 12;
610 610 mmu.level_shift[1] = 21;
611 611 mmu.level_shift[2] = 30;
612 612
613 613 } else {
614 614 mmu.num_level = 2;
615 615 mmu.max_level = 1;
616 616 mmu.ptes_per_table = 1024;
617 617 mmu.top_level_count = 1024;
618 618
619 619 mmu.level_shift[0] = 12;
620 620 mmu.level_shift[1] = 22;
621 621 }
622 622
623 623 #endif /* __i386 */
624 624
625 625 for (i = 0; i < mmu.num_level; ++i) {
626 626 mmu.level_size[i] = 1UL << mmu.level_shift[i];
627 627 mmu.level_offset[i] = mmu.level_size[i] - 1;
628 628 mmu.level_mask[i] = ~mmu.level_offset[i];
629 629 }
630 630
631 631 set_max_page_level();
632 632
633 633 mmu_page_sizes = mmu.max_page_level + 1;
634 634 mmu_exported_page_sizes = mmu.umax_page_level + 1;
635 635
636 636 /* restrict legacy applications from using pagesizes 1g and above */
637 637 mmu_legacy_page_sizes =
638 638 (mmu_exported_page_sizes > 2) ? 2 : mmu_exported_page_sizes;
639 639
640 640
641 641 for (i = 0; i <= mmu.max_page_level; ++i) {
642 642 mmu.pte_bits[i] = PT_VALID | pt_kern;
643 643 if (i > 0)
644 644 mmu.pte_bits[i] |= PT_PAGESIZE;
645 645 }
646 646
647 647 /*
648 648 * NOTE Legacy 32 bit PAE mode only has the P_VALID bit at top level.
649 649 */
650 650 for (i = 1; i < mmu.num_level; ++i)
651 651 mmu.ptp_bits[i] = PT_PTPBITS;
652 652
653 653 #if defined(__i386)
654 654 mmu.ptp_bits[2] = PT_VALID;
655 655 #endif
656 656
657 657 /*
658 658 * Compute how many hash table entries to have per process for htables.
659 659 * We start with 1 page's worth of entries.
660 660 *
661 661 * If physical memory is small, reduce the amount need to cover it.
662 662 */
663 663 max_htables = physmax / mmu.ptes_per_table;
664 664 mmu.hash_cnt = MMU_PAGESIZE / sizeof (htable_t *);
665 665 while (mmu.hash_cnt > 16 && mmu.hash_cnt >= max_htables)
666 666 mmu.hash_cnt >>= 1;
667 667 mmu.vlp_hash_cnt = mmu.hash_cnt;
668 668
669 669 #if defined(__amd64)
670 670 /*
671 671 * If running in 64 bits and physical memory is large,
672 672 * increase the size of the cache to cover all of memory for
673 673 * a 64 bit process.
674 674 */
675 675 #define HASH_MAX_LENGTH 4
676 676 while (mmu.hash_cnt * HASH_MAX_LENGTH < max_htables)
677 677 mmu.hash_cnt <<= 1;
678 678 #endif
679 679 }
680 680
681 681
682 682 /*
683 683 * initialize hat data structures
684 684 */
685 685 void
686 686 hat_init()
687 687 {
688 688 #if defined(__i386)
689 689 /*
690 690 * _userlimit must be aligned correctly
691 691 */
692 692 if ((_userlimit & LEVEL_MASK(1)) != _userlimit) {
693 693 prom_printf("hat_init(): _userlimit=%p, not aligned at %p\n",
694 694 (void *)_userlimit, (void *)LEVEL_SIZE(1));
695 695 halt("hat_init(): Unable to continue");
696 696 }
697 697 #endif
698 698
699 699 cv_init(&hat_list_cv, NULL, CV_DEFAULT, NULL);
700 700
701 701 /*
702 702 * initialize kmem caches
703 703 */
704 704 htable_init();
705 705 hment_init();
706 706
707 707 hat_cache = kmem_cache_create("hat_t",
708 708 sizeof (hat_t), 0, hati_constructor, NULL, NULL,
709 709 NULL, 0, 0);
710 710
711 711 hat_hash_cache = kmem_cache_create("HatHash",
712 712 mmu.hash_cnt * sizeof (htable_t *), 0, NULL, NULL, NULL,
713 713 NULL, 0, 0);
714 714
715 715 /*
716 716 * VLP hats can use a smaller hash table size on large memroy machines
717 717 */
718 718 if (mmu.hash_cnt == mmu.vlp_hash_cnt) {
↓ open down ↓ |
312 lines elided |
↑ open up ↑ |
719 719 vlp_hash_cache = hat_hash_cache;
720 720 } else {
721 721 vlp_hash_cache = kmem_cache_create("HatVlpHash",
722 722 mmu.vlp_hash_cnt * sizeof (htable_t *), 0, NULL, NULL, NULL,
723 723 NULL, 0, 0);
724 724 }
725 725
726 726 /*
727 727 * Set up the kernel's hat
728 728 */
729 - AS_LOCK_ENTER(&kas, &kas.a_lock, RW_WRITER);
729 + AS_LOCK_ENTER(&kas, RW_WRITER);
730 730 kas.a_hat = kmem_cache_alloc(hat_cache, KM_NOSLEEP);
731 731 mutex_init(&kas.a_hat->hat_mutex, NULL, MUTEX_DEFAULT, NULL);
732 732 kas.a_hat->hat_as = &kas;
733 733 kas.a_hat->hat_flags = 0;
734 - AS_LOCK_EXIT(&kas, &kas.a_lock);
734 + AS_LOCK_EXIT(&kas);
735 735
736 736 CPUSET_ZERO(khat_cpuset);
737 737 CPUSET_ADD(khat_cpuset, CPU->cpu_id);
738 738
739 739 /*
740 740 * The kernel hat's next pointer serves as the head of the hat list .
741 741 * The kernel hat's prev pointer tracks the last hat on the list for
742 742 * htable_steal() to use.
743 743 */
744 744 kas.a_hat->hat_next = NULL;
745 745 kas.a_hat->hat_prev = NULL;
746 746
747 747 /*
748 748 * Allocate an htable hash bucket for the kernel
749 749 * XX64 - tune for 64 bit procs
750 750 */
751 751 kas.a_hat->hat_num_hash = mmu.hash_cnt;
752 752 kas.a_hat->hat_ht_hash = kmem_cache_alloc(hat_hash_cache, KM_NOSLEEP);
753 753 bzero(kas.a_hat->hat_ht_hash, mmu.hash_cnt * sizeof (htable_t *));
754 754
755 755 /*
756 756 * zero out the top level and cached htable pointers
757 757 */
758 758 kas.a_hat->hat_ht_cached = NULL;
759 759 kas.a_hat->hat_htable = NULL;
760 760
761 761 /*
762 762 * Pre-allocate hrm_hashtab before enabling the collection of
763 763 * refmod statistics. Allocating on the fly would mean us
764 764 * running the risk of suffering recursive mutex enters or
765 765 * deadlocks.
766 766 */
767 767 hrm_hashtab = kmem_zalloc(HRM_HASHSIZE * sizeof (struct hrmstat *),
768 768 KM_SLEEP);
769 769 }
770 770
771 771 /*
772 772 * Prepare CPU specific pagetables for VLP processes on 64 bit kernels.
773 773 *
774 774 * Each CPU has a set of 2 pagetables that are reused for any 32 bit
775 775 * process it runs. They are the top level pagetable, hci_vlp_l3ptes, and
776 776 * the next to top level table for the bottom 512 Gig, hci_vlp_l2ptes.
777 777 */
778 778 /*ARGSUSED*/
779 779 static void
780 780 hat_vlp_setup(struct cpu *cpu)
781 781 {
782 782 #if defined(__amd64) && !defined(__xpv)
783 783 struct hat_cpu_info *hci = cpu->cpu_hat_info;
784 784 pfn_t pfn;
785 785
786 786 /*
787 787 * allocate the level==2 page table for the bottom most
788 788 * 512Gig of address space (this is where 32 bit apps live)
789 789 */
790 790 ASSERT(hci != NULL);
791 791 hci->hci_vlp_l2ptes = kmem_zalloc(MMU_PAGESIZE, KM_SLEEP);
792 792
793 793 /*
794 794 * Allocate a top level pagetable and copy the kernel's
795 795 * entries into it. Then link in hci_vlp_l2ptes in the 1st entry.
796 796 */
797 797 hci->hci_vlp_l3ptes = kmem_zalloc(MMU_PAGESIZE, KM_SLEEP);
798 798 hci->hci_vlp_pfn =
799 799 hat_getpfnum(kas.a_hat, (caddr_t)hci->hci_vlp_l3ptes);
800 800 ASSERT(hci->hci_vlp_pfn != PFN_INVALID);
801 801 bcopy(vlp_page, hci->hci_vlp_l3ptes, MMU_PAGESIZE);
802 802
803 803 pfn = hat_getpfnum(kas.a_hat, (caddr_t)hci->hci_vlp_l2ptes);
804 804 ASSERT(pfn != PFN_INVALID);
805 805 hci->hci_vlp_l3ptes[0] = MAKEPTP(pfn, 2);
806 806 #endif /* __amd64 && !__xpv */
807 807 }
808 808
809 809 /*ARGSUSED*/
810 810 static void
811 811 hat_vlp_teardown(cpu_t *cpu)
812 812 {
813 813 #if defined(__amd64) && !defined(__xpv)
814 814 struct hat_cpu_info *hci;
815 815
816 816 if ((hci = cpu->cpu_hat_info) == NULL)
817 817 return;
818 818 if (hci->hci_vlp_l2ptes)
819 819 kmem_free(hci->hci_vlp_l2ptes, MMU_PAGESIZE);
820 820 if (hci->hci_vlp_l3ptes)
821 821 kmem_free(hci->hci_vlp_l3ptes, MMU_PAGESIZE);
822 822 #endif
823 823 }
824 824
825 825 #define NEXT_HKR(r, l, s, e) { \
826 826 kernel_ranges[r].hkr_level = l; \
827 827 kernel_ranges[r].hkr_start_va = s; \
828 828 kernel_ranges[r].hkr_end_va = e; \
829 829 ++r; \
830 830 }
831 831
832 832 /*
833 833 * Finish filling in the kernel hat.
834 834 * Pre fill in all top level kernel page table entries for the kernel's
835 835 * part of the address range. From this point on we can't use any new
836 836 * kernel large pages if they need PTE's at max_level
837 837 *
838 838 * create the kmap mappings.
839 839 */
840 840 void
841 841 hat_init_finish(void)
842 842 {
843 843 size_t size;
844 844 uint_t r = 0;
845 845 uintptr_t va;
846 846 hat_kernel_range_t *rp;
847 847
848 848
849 849 /*
850 850 * We are now effectively running on the kernel hat.
851 851 * Clearing use_boot_reserve shuts off using the pre-allocated boot
852 852 * reserve for all HAT allocations. From here on, the reserves are
853 853 * only used when avoiding recursion in kmem_alloc().
854 854 */
855 855 use_boot_reserve = 0;
856 856 htable_adjust_reserve();
857 857
858 858 /*
859 859 * User HATs are initialized with copies of all kernel mappings in
860 860 * higher level page tables. Ensure that those entries exist.
861 861 */
862 862 #if defined(__amd64)
863 863
864 864 NEXT_HKR(r, 3, kernelbase, 0);
865 865 #if defined(__xpv)
866 866 NEXT_HKR(r, 3, HYPERVISOR_VIRT_START, HYPERVISOR_VIRT_END);
867 867 #endif
868 868
869 869 #elif defined(__i386)
870 870
871 871 #if !defined(__xpv)
872 872 if (mmu.pae_hat) {
873 873 va = kernelbase;
874 874 if ((va & LEVEL_MASK(2)) != va) {
875 875 va = P2ROUNDUP(va, LEVEL_SIZE(2));
876 876 NEXT_HKR(r, 1, kernelbase, va);
877 877 }
878 878 if (va != 0)
879 879 NEXT_HKR(r, 2, va, 0);
880 880 } else
881 881 #endif /* __xpv */
882 882 NEXT_HKR(r, 1, kernelbase, 0);
883 883
884 884 #endif /* __i386 */
885 885
886 886 num_kernel_ranges = r;
887 887
888 888 /*
889 889 * Create all the kernel pagetables that will have entries
890 890 * shared to user HATs.
891 891 */
892 892 for (r = 0; r < num_kernel_ranges; ++r) {
893 893 rp = &kernel_ranges[r];
894 894 for (va = rp->hkr_start_va; va != rp->hkr_end_va;
895 895 va += LEVEL_SIZE(rp->hkr_level)) {
896 896 htable_t *ht;
897 897
898 898 if (IN_HYPERVISOR_VA(va))
899 899 continue;
900 900
901 901 /* can/must skip if a page mapping already exists */
902 902 if (rp->hkr_level <= mmu.max_page_level &&
903 903 (ht = htable_getpage(kas.a_hat, va, NULL)) !=
904 904 NULL) {
905 905 htable_release(ht);
906 906 continue;
907 907 }
908 908
909 909 (void) htable_create(kas.a_hat, va, rp->hkr_level - 1,
910 910 NULL);
911 911 }
912 912 }
913 913
914 914 /*
915 915 * 32 bit PAE metal kernels use only 4 of the 512 entries in the
916 916 * page holding the top level pagetable. We use the remainder for
917 917 * the "per CPU" page tables for VLP processes.
918 918 * Map the top level kernel pagetable into the kernel to make
919 919 * it easy to use bcopy access these tables.
920 920 */
921 921 if (mmu.pae_hat) {
922 922 vlp_page = vmem_alloc(heap_arena, MMU_PAGESIZE, VM_SLEEP);
923 923 hat_devload(kas.a_hat, (caddr_t)vlp_page, MMU_PAGESIZE,
924 924 kas.a_hat->hat_htable->ht_pfn,
925 925 #if !defined(__xpv)
926 926 PROT_WRITE |
927 927 #endif
928 928 PROT_READ | HAT_NOSYNC | HAT_UNORDERED_OK,
929 929 HAT_LOAD | HAT_LOAD_NOCONSIST);
930 930 }
931 931 hat_vlp_setup(CPU);
932 932
933 933 /*
934 934 * Create kmap (cached mappings of kernel PTEs)
935 935 * for 32 bit we map from segmap_start .. ekernelheap
936 936 * for 64 bit we map from segmap_start .. segmap_start + segmapsize;
937 937 */
938 938 #if defined(__i386)
939 939 size = (uintptr_t)ekernelheap - segmap_start;
940 940 #elif defined(__amd64)
941 941 size = segmapsize;
942 942 #endif
943 943 hat_kmap_init((uintptr_t)segmap_start, size);
944 944 }
945 945
946 946 /*
947 947 * On 32 bit PAE mode, PTE's are 64 bits, but ordinary atomic memory references
948 948 * are 32 bit, so for safety we must use atomic_cas_64() to install these.
949 949 */
950 950 #ifdef __i386
951 951 static void
952 952 reload_pae32(hat_t *hat, cpu_t *cpu)
953 953 {
954 954 x86pte_t *src;
955 955 x86pte_t *dest;
956 956 x86pte_t pte;
957 957 int i;
958 958
959 959 /*
960 960 * Load the 4 entries of the level 2 page table into this
961 961 * cpu's range of the vlp_page and point cr3 at them.
962 962 */
963 963 ASSERT(mmu.pae_hat);
964 964 src = hat->hat_vlp_ptes;
965 965 dest = vlp_page + (cpu->cpu_id + 1) * VLP_NUM_PTES;
966 966 for (i = 0; i < VLP_NUM_PTES; ++i) {
967 967 for (;;) {
968 968 pte = dest[i];
969 969 if (pte == src[i])
970 970 break;
971 971 if (atomic_cas_64(dest + i, pte, src[i]) != src[i])
972 972 break;
973 973 }
974 974 }
975 975 }
976 976 #endif
977 977
978 978 /*
979 979 * Switch to a new active hat, maintaining bit masks to track active CPUs.
980 980 *
981 981 * On the 32-bit PAE hypervisor, %cr3 is a 64-bit value, on metal it
982 982 * remains a 32-bit value.
983 983 */
984 984 void
985 985 hat_switch(hat_t *hat)
986 986 {
987 987 uint64_t newcr3;
988 988 cpu_t *cpu = CPU;
989 989 hat_t *old = cpu->cpu_current_hat;
990 990
991 991 /*
992 992 * set up this information first, so we don't miss any cross calls
993 993 */
994 994 if (old != NULL) {
995 995 if (old == hat)
996 996 return;
997 997 if (old != kas.a_hat)
998 998 CPUSET_ATOMIC_DEL(old->hat_cpus, cpu->cpu_id);
999 999 }
1000 1000
1001 1001 /*
1002 1002 * Add this CPU to the active set for this HAT.
1003 1003 */
1004 1004 if (hat != kas.a_hat) {
1005 1005 CPUSET_ATOMIC_ADD(hat->hat_cpus, cpu->cpu_id);
1006 1006 }
1007 1007 cpu->cpu_current_hat = hat;
1008 1008
1009 1009 /*
1010 1010 * now go ahead and load cr3
1011 1011 */
1012 1012 if (hat->hat_flags & HAT_VLP) {
1013 1013 #if defined(__amd64)
1014 1014 x86pte_t *vlpptep = cpu->cpu_hat_info->hci_vlp_l2ptes;
1015 1015
1016 1016 VLP_COPY(hat->hat_vlp_ptes, vlpptep);
1017 1017 newcr3 = MAKECR3(cpu->cpu_hat_info->hci_vlp_pfn);
1018 1018 #elif defined(__i386)
1019 1019 reload_pae32(hat, cpu);
1020 1020 newcr3 = MAKECR3(kas.a_hat->hat_htable->ht_pfn) +
1021 1021 (cpu->cpu_id + 1) * VLP_SIZE;
1022 1022 #endif
1023 1023 } else {
1024 1024 newcr3 = MAKECR3((uint64_t)hat->hat_htable->ht_pfn);
1025 1025 }
1026 1026 #ifdef __xpv
1027 1027 {
1028 1028 struct mmuext_op t[2];
1029 1029 uint_t retcnt;
1030 1030 uint_t opcnt = 1;
1031 1031
1032 1032 t[0].cmd = MMUEXT_NEW_BASEPTR;
1033 1033 t[0].arg1.mfn = mmu_btop(pa_to_ma(newcr3));
1034 1034 #if defined(__amd64)
1035 1035 /*
1036 1036 * There's an interesting problem here, as to what to
1037 1037 * actually specify when switching to the kernel hat.
1038 1038 * For now we'll reuse the kernel hat again.
1039 1039 */
1040 1040 t[1].cmd = MMUEXT_NEW_USER_BASEPTR;
1041 1041 if (hat == kas.a_hat)
1042 1042 t[1].arg1.mfn = mmu_btop(pa_to_ma(newcr3));
1043 1043 else
1044 1044 t[1].arg1.mfn = pfn_to_mfn(hat->hat_user_ptable);
1045 1045 ++opcnt;
1046 1046 #endif /* __amd64 */
1047 1047 if (HYPERVISOR_mmuext_op(t, opcnt, &retcnt, DOMID_SELF) < 0)
1048 1048 panic("HYPERVISOR_mmu_update() failed");
1049 1049 ASSERT(retcnt == opcnt);
1050 1050
1051 1051 }
1052 1052 #else
1053 1053 setcr3(newcr3);
1054 1054 #endif
1055 1055 ASSERT(cpu == CPU);
1056 1056 }
1057 1057
1058 1058 /*
1059 1059 * Utility to return a valid x86pte_t from protections, pfn, and level number
1060 1060 */
1061 1061 static x86pte_t
1062 1062 hati_mkpte(pfn_t pfn, uint_t attr, level_t level, uint_t flags)
1063 1063 {
1064 1064 x86pte_t pte;
1065 1065 uint_t cache_attr = attr & HAT_ORDER_MASK;
1066 1066
1067 1067 pte = MAKEPTE(pfn, level);
1068 1068
1069 1069 if (attr & PROT_WRITE)
1070 1070 PTE_SET(pte, PT_WRITABLE);
1071 1071
1072 1072 if (attr & PROT_USER)
1073 1073 PTE_SET(pte, PT_USER);
1074 1074
1075 1075 if (!(attr & PROT_EXEC))
1076 1076 PTE_SET(pte, mmu.pt_nx);
1077 1077
1078 1078 /*
1079 1079 * Set the software bits used track ref/mod sync's and hments.
1080 1080 * If not using REF/MOD, set them to avoid h/w rewriting PTEs.
1081 1081 */
1082 1082 if (flags & HAT_LOAD_NOCONSIST)
1083 1083 PTE_SET(pte, PT_NOCONSIST | PT_REF | PT_MOD);
1084 1084 else if (attr & HAT_NOSYNC)
1085 1085 PTE_SET(pte, PT_NOSYNC | PT_REF | PT_MOD);
1086 1086
1087 1087 /*
1088 1088 * Set the caching attributes in the PTE. The combination
1089 1089 * of attributes are poorly defined, so we pay attention
1090 1090 * to them in the given order.
1091 1091 *
1092 1092 * The test for HAT_STRICTORDER is different because it's defined
1093 1093 * as "0" - which was a stupid thing to do, but is too late to change!
1094 1094 */
1095 1095 if (cache_attr == HAT_STRICTORDER) {
1096 1096 PTE_SET(pte, PT_NOCACHE);
1097 1097 /*LINTED [Lint hates empty ifs, but it's the obvious way to do this] */
1098 1098 } else if (cache_attr & (HAT_UNORDERED_OK | HAT_STORECACHING_OK)) {
1099 1099 /* nothing to set */;
1100 1100 } else if (cache_attr & (HAT_MERGING_OK | HAT_LOADCACHING_OK)) {
1101 1101 PTE_SET(pte, PT_NOCACHE);
1102 1102 if (is_x86_feature(x86_featureset, X86FSET_PAT))
1103 1103 PTE_SET(pte, (level == 0) ? PT_PAT_4K : PT_PAT_LARGE);
1104 1104 else
1105 1105 PTE_SET(pte, PT_WRITETHRU);
1106 1106 } else {
1107 1107 panic("hati_mkpte(): bad caching attributes: %x\n", cache_attr);
1108 1108 }
1109 1109
1110 1110 return (pte);
1111 1111 }
1112 1112
1113 1113 /*
1114 1114 * Duplicate address translations of the parent to the child.
1115 1115 * This function really isn't used anymore.
1116 1116 */
1117 1117 /*ARGSUSED*/
1118 1118 int
1119 1119 hat_dup(hat_t *old, hat_t *new, caddr_t addr, size_t len, uint_t flag)
1120 1120 {
1121 1121 ASSERT((uintptr_t)addr < kernelbase);
1122 1122 ASSERT(new != kas.a_hat);
1123 1123 ASSERT(old != kas.a_hat);
1124 1124 return (0);
1125 1125 }
1126 1126
1127 1127 /*
1128 1128 * Allocate any hat resources required for a process being swapped in.
1129 1129 */
1130 1130 /*ARGSUSED*/
1131 1131 void
1132 1132 hat_swapin(hat_t *hat)
1133 1133 {
1134 1134 /* do nothing - we let everything fault back in */
1135 1135 }
1136 1136
1137 1137 /*
1138 1138 * Unload all translations associated with an address space of a process
1139 1139 * that is being swapped out.
1140 1140 */
1141 1141 void
1142 1142 hat_swapout(hat_t *hat)
1143 1143 {
1144 1144 uintptr_t vaddr = (uintptr_t)0;
1145 1145 uintptr_t eaddr = _userlimit;
1146 1146 htable_t *ht = NULL;
1147 1147 level_t l;
1148 1148
1149 1149 XPV_DISALLOW_MIGRATE();
↓ open down ↓ |
405 lines elided |
↑ open up ↑ |
1150 1150 /*
1151 1151 * We can't just call hat_unload(hat, 0, _userlimit...) here, because
1152 1152 * seg_spt and shared pagetables can't be swapped out.
1153 1153 * Take a look at segspt_shmswapout() - it's a big no-op.
1154 1154 *
1155 1155 * Instead we'll walk through all the address space and unload
1156 1156 * any mappings which we are sure are not shared, not locked.
1157 1157 */
1158 1158 ASSERT(IS_PAGEALIGNED(vaddr));
1159 1159 ASSERT(IS_PAGEALIGNED(eaddr));
1160 - ASSERT(AS_LOCK_HELD(hat->hat_as, &hat->hat_as->a_lock));
1160 + ASSERT(AS_LOCK_HELD(hat->hat_as));
1161 1161 if ((uintptr_t)hat->hat_as->a_userlimit < eaddr)
1162 1162 eaddr = (uintptr_t)hat->hat_as->a_userlimit;
1163 1163
1164 1164 while (vaddr < eaddr) {
1165 1165 (void) htable_walk(hat, &ht, &vaddr, eaddr);
1166 1166 if (ht == NULL)
1167 1167 break;
1168 1168
1169 1169 ASSERT(!IN_VA_HOLE(vaddr));
1170 1170
1171 1171 /*
1172 1172 * If the page table is shared skip its entire range.
1173 1173 */
1174 1174 l = ht->ht_level;
1175 1175 if (ht->ht_flags & HTABLE_SHARED_PFN) {
1176 1176 vaddr = ht->ht_vaddr + LEVEL_SIZE(l + 1);
1177 1177 htable_release(ht);
1178 1178 ht = NULL;
1179 1179 continue;
1180 1180 }
1181 1181
1182 1182 /*
1183 1183 * If the page table has no locked entries, unload this one.
1184 1184 */
1185 1185 if (ht->ht_lock_cnt == 0)
1186 1186 hat_unload(hat, (caddr_t)vaddr, LEVEL_SIZE(l),
1187 1187 HAT_UNLOAD_UNMAP);
1188 1188
1189 1189 /*
1190 1190 * If we have a level 0 page table with locked entries,
1191 1191 * skip the entire page table, otherwise skip just one entry.
1192 1192 */
1193 1193 if (ht->ht_lock_cnt > 0 && l == 0)
1194 1194 vaddr = ht->ht_vaddr + LEVEL_SIZE(1);
1195 1195 else
1196 1196 vaddr += LEVEL_SIZE(l);
1197 1197 }
1198 1198 if (ht)
1199 1199 htable_release(ht);
1200 1200
1201 1201 /*
1202 1202 * We're in swapout because the system is low on memory, so
1203 1203 * go back and flush all the htables off the cached list.
1204 1204 */
1205 1205 htable_purge_hat(hat);
1206 1206 XPV_ALLOW_MIGRATE();
1207 1207 }
1208 1208
1209 1209 /*
1210 1210 * returns number of bytes that have valid mappings in hat.
1211 1211 */
1212 1212 size_t
1213 1213 hat_get_mapped_size(hat_t *hat)
1214 1214 {
1215 1215 size_t total = 0;
1216 1216 int l;
1217 1217
1218 1218 for (l = 0; l <= mmu.max_page_level; l++)
1219 1219 total += (hat->hat_pages_mapped[l] << LEVEL_SHIFT(l));
1220 1220 total += hat->hat_ism_pgcnt;
1221 1221
1222 1222 return (total);
1223 1223 }
1224 1224
1225 1225 /*
1226 1226 * enable/disable collection of stats for hat.
1227 1227 */
1228 1228 int
1229 1229 hat_stats_enable(hat_t *hat)
1230 1230 {
1231 1231 atomic_inc_32(&hat->hat_stats);
1232 1232 return (1);
1233 1233 }
1234 1234
1235 1235 void
1236 1236 hat_stats_disable(hat_t *hat)
1237 1237 {
1238 1238 atomic_dec_32(&hat->hat_stats);
1239 1239 }
1240 1240
1241 1241 /*
1242 1242 * Utility to sync the ref/mod bits from a page table entry to the page_t
1243 1243 * We must be holding the mapping list lock when this is called.
1244 1244 */
1245 1245 static void
1246 1246 hati_sync_pte_to_page(page_t *pp, x86pte_t pte, level_t level)
1247 1247 {
1248 1248 uint_t rm = 0;
1249 1249 pgcnt_t pgcnt;
1250 1250
1251 1251 if (PTE_GET(pte, PT_SOFTWARE) >= PT_NOSYNC)
1252 1252 return;
1253 1253
1254 1254 if (PTE_GET(pte, PT_REF))
1255 1255 rm |= P_REF;
1256 1256
1257 1257 if (PTE_GET(pte, PT_MOD))
1258 1258 rm |= P_MOD;
1259 1259
1260 1260 if (rm == 0)
1261 1261 return;
1262 1262
1263 1263 /*
1264 1264 * sync to all constituent pages of a large page
1265 1265 */
1266 1266 ASSERT(x86_hm_held(pp));
1267 1267 pgcnt = page_get_pagecnt(level);
1268 1268 ASSERT(IS_P2ALIGNED(pp->p_pagenum, pgcnt));
1269 1269 for (; pgcnt > 0; --pgcnt) {
1270 1270 /*
1271 1271 * hat_page_demote() can't decrease
1272 1272 * pszc below this mapping size
1273 1273 * since this large mapping existed after we
1274 1274 * took mlist lock.
1275 1275 */
1276 1276 ASSERT(pp->p_szc >= level);
1277 1277 hat_page_setattr(pp, rm);
1278 1278 ++pp;
1279 1279 }
1280 1280 }
1281 1281
1282 1282 /*
1283 1283 * This the set of PTE bits for PFN, permissions and caching
1284 1284 * that are allowed to change on a HAT_LOAD_REMAP
1285 1285 */
1286 1286 #define PT_REMAP_BITS \
1287 1287 (PT_PADDR | PT_NX | PT_WRITABLE | PT_WRITETHRU | \
1288 1288 PT_NOCACHE | PT_PAT_4K | PT_PAT_LARGE | PT_IGNORE | PT_REF | PT_MOD)
1289 1289
1290 1290 #define REMAPASSERT(EX) if (!(EX)) panic("hati_pte_map: " #EX)
1291 1291 /*
1292 1292 * Do the low-level work to get a mapping entered into a HAT's pagetables
1293 1293 * and in the mapping list of the associated page_t.
1294 1294 */
1295 1295 static int
1296 1296 hati_pte_map(
1297 1297 htable_t *ht,
1298 1298 uint_t entry,
1299 1299 page_t *pp,
1300 1300 x86pte_t pte,
1301 1301 int flags,
1302 1302 void *pte_ptr)
1303 1303 {
1304 1304 hat_t *hat = ht->ht_hat;
1305 1305 x86pte_t old_pte;
1306 1306 level_t l = ht->ht_level;
1307 1307 hment_t *hm;
1308 1308 uint_t is_consist;
1309 1309 uint_t is_locked;
1310 1310 int rv = 0;
1311 1311
1312 1312 /*
1313 1313 * Is this a consistent (ie. need mapping list lock) mapping?
1314 1314 */
1315 1315 is_consist = (pp != NULL && (flags & HAT_LOAD_NOCONSIST) == 0);
1316 1316
1317 1317 /*
1318 1318 * Track locked mapping count in the htable. Do this first,
1319 1319 * as we track locking even if there already is a mapping present.
1320 1320 */
1321 1321 is_locked = (flags & HAT_LOAD_LOCK) != 0 && hat != kas.a_hat;
1322 1322 if (is_locked)
1323 1323 HTABLE_LOCK_INC(ht);
1324 1324
1325 1325 /*
1326 1326 * Acquire the page's mapping list lock and get an hment to use.
1327 1327 * Note that hment_prepare() might return NULL.
1328 1328 */
1329 1329 if (is_consist) {
1330 1330 x86_hm_enter(pp);
1331 1331 hm = hment_prepare(ht, entry, pp);
1332 1332 }
1333 1333
1334 1334 /*
1335 1335 * Set the new pte, retrieving the old one at the same time.
1336 1336 */
1337 1337 old_pte = x86pte_set(ht, entry, pte, pte_ptr);
1338 1338
1339 1339 /*
1340 1340 * Did we get a large page / page table collision?
1341 1341 */
1342 1342 if (old_pte == LPAGE_ERROR) {
1343 1343 if (is_locked)
1344 1344 HTABLE_LOCK_DEC(ht);
1345 1345 rv = -1;
1346 1346 goto done;
1347 1347 }
1348 1348
1349 1349 /*
1350 1350 * If the mapping didn't change there is nothing more to do.
1351 1351 */
1352 1352 if (PTE_EQUIV(pte, old_pte))
1353 1353 goto done;
1354 1354
1355 1355 /*
1356 1356 * Install a new mapping in the page's mapping list
1357 1357 */
1358 1358 if (!PTE_ISVALID(old_pte)) {
1359 1359 if (is_consist) {
1360 1360 hment_assign(ht, entry, pp, hm);
1361 1361 x86_hm_exit(pp);
1362 1362 } else {
1363 1363 ASSERT(flags & HAT_LOAD_NOCONSIST);
1364 1364 }
1365 1365 #if defined(__amd64)
1366 1366 if (ht->ht_flags & HTABLE_VLP) {
1367 1367 cpu_t *cpu = CPU;
1368 1368 x86pte_t *vlpptep = cpu->cpu_hat_info->hci_vlp_l2ptes;
1369 1369 VLP_COPY(hat->hat_vlp_ptes, vlpptep);
1370 1370 }
1371 1371 #endif
1372 1372 HTABLE_INC(ht->ht_valid_cnt);
1373 1373 PGCNT_INC(hat, l);
1374 1374 return (rv);
1375 1375 }
1376 1376
1377 1377 /*
1378 1378 * Remap's are more complicated:
1379 1379 * - HAT_LOAD_REMAP must be specified if changing the pfn.
1380 1380 * We also require that NOCONSIST be specified.
1381 1381 * - Otherwise only permission or caching bits may change.
1382 1382 */
1383 1383 if (!PTE_ISPAGE(old_pte, l))
1384 1384 panic("non-null/page mapping pte=" FMT_PTE, old_pte);
1385 1385
1386 1386 if (PTE2PFN(old_pte, l) != PTE2PFN(pte, l)) {
1387 1387 REMAPASSERT(flags & HAT_LOAD_REMAP);
1388 1388 REMAPASSERT(flags & HAT_LOAD_NOCONSIST);
1389 1389 REMAPASSERT(PTE_GET(old_pte, PT_SOFTWARE) >= PT_NOCONSIST);
1390 1390 REMAPASSERT(pf_is_memory(PTE2PFN(old_pte, l)) ==
1391 1391 pf_is_memory(PTE2PFN(pte, l)));
1392 1392 REMAPASSERT(!is_consist);
1393 1393 }
1394 1394
1395 1395 /*
1396 1396 * We only let remaps change the certain bits in the PTE.
1397 1397 */
1398 1398 if (PTE_GET(old_pte, ~PT_REMAP_BITS) != PTE_GET(pte, ~PT_REMAP_BITS))
1399 1399 panic("remap bits changed: old_pte="FMT_PTE", pte="FMT_PTE"\n",
1400 1400 old_pte, pte);
1401 1401
1402 1402 /*
1403 1403 * We don't create any mapping list entries on a remap, so release
1404 1404 * any allocated hment after we drop the mapping list lock.
1405 1405 */
1406 1406 done:
1407 1407 if (is_consist) {
1408 1408 x86_hm_exit(pp);
1409 1409 if (hm != NULL)
1410 1410 hment_free(hm);
1411 1411 }
1412 1412 return (rv);
1413 1413 }
1414 1414
1415 1415 /*
1416 1416 * Internal routine to load a single page table entry. This only fails if
1417 1417 * we attempt to overwrite a page table link with a large page.
1418 1418 */
1419 1419 static int
1420 1420 hati_load_common(
1421 1421 hat_t *hat,
1422 1422 uintptr_t va,
1423 1423 page_t *pp,
1424 1424 uint_t attr,
1425 1425 uint_t flags,
1426 1426 level_t level,
1427 1427 pfn_t pfn)
1428 1428 {
1429 1429 htable_t *ht;
1430 1430 uint_t entry;
↓ open down ↓ |
260 lines elided |
↑ open up ↑ |
1431 1431 x86pte_t pte;
1432 1432 int rv = 0;
1433 1433
1434 1434 /*
1435 1435 * The number 16 is arbitrary and here to catch a recursion problem
1436 1436 * early before we blow out the kernel stack.
1437 1437 */
1438 1438 ++curthread->t_hatdepth;
1439 1439 ASSERT(curthread->t_hatdepth < 16);
1440 1440
1441 - ASSERT(hat == kas.a_hat ||
1442 - AS_LOCK_HELD(hat->hat_as, &hat->hat_as->a_lock));
1441 + ASSERT(hat == kas.a_hat || AS_LOCK_HELD(hat->hat_as));
1443 1442
1444 1443 if (flags & HAT_LOAD_SHARE)
1445 1444 hat->hat_flags |= HAT_SHARED;
1446 1445
1447 1446 /*
1448 1447 * Find the page table that maps this page if it already exists.
1449 1448 */
1450 1449 ht = htable_lookup(hat, va, level);
1451 1450
1452 1451 /*
1453 1452 * We must have HAT_LOAD_NOCONSIST if page_t is NULL.
1454 1453 */
1455 1454 if (pp == NULL)
1456 1455 flags |= HAT_LOAD_NOCONSIST;
1457 1456
1458 1457 if (ht == NULL) {
1459 1458 ht = htable_create(hat, va, level, NULL);
1460 1459 ASSERT(ht != NULL);
1461 1460 }
1462 1461 entry = htable_va2entry(va, ht);
1463 1462
1464 1463 /*
1465 1464 * a bunch of paranoid error checking
1466 1465 */
1467 1466 ASSERT(ht->ht_busy > 0);
1468 1467 if (ht->ht_vaddr > va || va > HTABLE_LAST_PAGE(ht))
1469 1468 panic("hati_load_common: bad htable %p, va %p",
1470 1469 (void *)ht, (void *)va);
1471 1470 ASSERT(ht->ht_level == level);
1472 1471
1473 1472 /*
1474 1473 * construct the new PTE
1475 1474 */
1476 1475 if (hat == kas.a_hat)
1477 1476 attr &= ~PROT_USER;
1478 1477 pte = hati_mkpte(pfn, attr, level, flags);
1479 1478 if (hat == kas.a_hat && va >= kernelbase)
1480 1479 PTE_SET(pte, mmu.pt_global);
1481 1480
1482 1481 /*
1483 1482 * establish the mapping
1484 1483 */
1485 1484 rv = hati_pte_map(ht, entry, pp, pte, flags, NULL);
1486 1485
1487 1486 /*
1488 1487 * release the htable and any reserves
1489 1488 */
1490 1489 htable_release(ht);
1491 1490 --curthread->t_hatdepth;
1492 1491 return (rv);
1493 1492 }
1494 1493
1495 1494 /*
1496 1495 * special case of hat_memload to deal with some kernel addrs for performance
1497 1496 */
1498 1497 static void
1499 1498 hat_kmap_load(
1500 1499 caddr_t addr,
1501 1500 page_t *pp,
1502 1501 uint_t attr,
1503 1502 uint_t flags)
1504 1503 {
1505 1504 uintptr_t va = (uintptr_t)addr;
1506 1505 x86pte_t pte;
1507 1506 pfn_t pfn = page_pptonum(pp);
1508 1507 pgcnt_t pg_off = mmu_btop(va - mmu.kmap_addr);
1509 1508 htable_t *ht;
1510 1509 uint_t entry;
1511 1510 void *pte_ptr;
1512 1511
1513 1512 /*
1514 1513 * construct the requested PTE
1515 1514 */
1516 1515 attr &= ~PROT_USER;
1517 1516 attr |= HAT_STORECACHING_OK;
1518 1517 pte = hati_mkpte(pfn, attr, 0, flags);
1519 1518 PTE_SET(pte, mmu.pt_global);
1520 1519
1521 1520 /*
1522 1521 * Figure out the pte_ptr and htable and use common code to finish up
1523 1522 */
1524 1523 if (mmu.pae_hat)
1525 1524 pte_ptr = mmu.kmap_ptes + pg_off;
1526 1525 else
1527 1526 pte_ptr = (x86pte32_t *)mmu.kmap_ptes + pg_off;
1528 1527 ht = mmu.kmap_htables[(va - mmu.kmap_htables[0]->ht_vaddr) >>
1529 1528 LEVEL_SHIFT(1)];
1530 1529 entry = htable_va2entry(va, ht);
1531 1530 ++curthread->t_hatdepth;
1532 1531 ASSERT(curthread->t_hatdepth < 16);
1533 1532 (void) hati_pte_map(ht, entry, pp, pte, flags, pte_ptr);
1534 1533 --curthread->t_hatdepth;
1535 1534 }
1536 1535
1537 1536 /*
1538 1537 * hat_memload() - load a translation to the given page struct
1539 1538 *
1540 1539 * Flags for hat_memload/hat_devload/hat_*attr.
1541 1540 *
1542 1541 * HAT_LOAD Default flags to load a translation to the page.
1543 1542 *
1544 1543 * HAT_LOAD_LOCK Lock down mapping resources; hat_map(), hat_memload(),
1545 1544 * and hat_devload().
1546 1545 *
1547 1546 * HAT_LOAD_NOCONSIST Do not add mapping to page_t mapping list.
1548 1547 * sets PT_NOCONSIST
1549 1548 *
1550 1549 * HAT_LOAD_SHARE A flag to hat_memload() to indicate h/w page tables
1551 1550 * that map some user pages (not kas) is shared by more
1552 1551 * than one process (eg. ISM).
1553 1552 *
1554 1553 * HAT_LOAD_REMAP Reload a valid pte with a different page frame.
1555 1554 *
1556 1555 * HAT_NO_KALLOC Do not kmem_alloc while creating the mapping; at this
1557 1556 * point, it's setting up mapping to allocate internal
1558 1557 * hat layer data structures. This flag forces hat layer
1559 1558 * to tap its reserves in order to prevent infinite
1560 1559 * recursion.
1561 1560 *
1562 1561 * The following is a protection attribute (like PROT_READ, etc.)
1563 1562 *
1564 1563 * HAT_NOSYNC set PT_NOSYNC - this mapping's ref/mod bits
1565 1564 * are never cleared.
1566 1565 *
1567 1566 * Installing new valid PTE's and creation of the mapping list
1568 1567 * entry are controlled under the same lock. It's derived from the
1569 1568 * page_t being mapped.
1570 1569 */
1571 1570 static uint_t supported_memload_flags =
1572 1571 HAT_LOAD | HAT_LOAD_LOCK | HAT_LOAD_ADV | HAT_LOAD_NOCONSIST |
1573 1572 HAT_LOAD_SHARE | HAT_NO_KALLOC | HAT_LOAD_REMAP | HAT_LOAD_TEXT;
1574 1573
1575 1574 void
1576 1575 hat_memload(
1577 1576 hat_t *hat,
1578 1577 caddr_t addr,
1579 1578 page_t *pp,
↓ open down ↓ |
127 lines elided |
↑ open up ↑ |
1580 1579 uint_t attr,
1581 1580 uint_t flags)
1582 1581 {
1583 1582 uintptr_t va = (uintptr_t)addr;
1584 1583 level_t level = 0;
1585 1584 pfn_t pfn = page_pptonum(pp);
1586 1585
1587 1586 XPV_DISALLOW_MIGRATE();
1588 1587 ASSERT(IS_PAGEALIGNED(va));
1589 1588 ASSERT(hat == kas.a_hat || va < _userlimit);
1590 - ASSERT(hat == kas.a_hat ||
1591 - AS_LOCK_HELD(hat->hat_as, &hat->hat_as->a_lock));
1589 + ASSERT(hat == kas.a_hat || AS_LOCK_HELD(hat->hat_as));
1592 1590 ASSERT((flags & supported_memload_flags) == flags);
1593 1591
1594 1592 ASSERT(!IN_VA_HOLE(va));
1595 1593 ASSERT(!PP_ISFREE(pp));
1596 1594
1597 1595 /*
1598 1596 * kernel address special case for performance.
1599 1597 */
1600 1598 if (mmu.kmap_addr <= va && va < mmu.kmap_eaddr) {
1601 1599 ASSERT(hat == kas.a_hat);
1602 1600 hat_kmap_load(addr, pp, attr, flags);
1603 1601 XPV_ALLOW_MIGRATE();
1604 1602 return;
1605 1603 }
1606 1604
1607 1605 /*
1608 1606 * This is used for memory with normal caching enabled, so
1609 1607 * always set HAT_STORECACHING_OK.
1610 1608 */
1611 1609 attr |= HAT_STORECACHING_OK;
1612 1610 if (hati_load_common(hat, va, pp, attr, flags, level, pfn) != 0)
1613 1611 panic("unexpected hati_load_common() failure");
1614 1612 XPV_ALLOW_MIGRATE();
1615 1613 }
1616 1614
1617 1615 /* ARGSUSED */
1618 1616 void
1619 1617 hat_memload_region(struct hat *hat, caddr_t addr, struct page *pp,
1620 1618 uint_t attr, uint_t flags, hat_region_cookie_t rcookie)
1621 1619 {
1622 1620 hat_memload(hat, addr, pp, attr, flags);
1623 1621 }
1624 1622
1625 1623 /*
1626 1624 * Load the given array of page structs using large pages when possible
1627 1625 */
1628 1626 void
1629 1627 hat_memload_array(
1630 1628 hat_t *hat,
1631 1629 caddr_t addr,
1632 1630 size_t len,
1633 1631 page_t **pages,
1634 1632 uint_t attr,
1635 1633 uint_t flags)
1636 1634 {
1637 1635 uintptr_t va = (uintptr_t)addr;
↓ open down ↓ |
36 lines elided |
↑ open up ↑ |
1638 1636 uintptr_t eaddr = va + len;
1639 1637 level_t level;
1640 1638 size_t pgsize;
1641 1639 pgcnt_t pgindx = 0;
1642 1640 pfn_t pfn;
1643 1641 pgcnt_t i;
1644 1642
1645 1643 XPV_DISALLOW_MIGRATE();
1646 1644 ASSERT(IS_PAGEALIGNED(va));
1647 1645 ASSERT(hat == kas.a_hat || va + len <= _userlimit);
1648 - ASSERT(hat == kas.a_hat ||
1649 - AS_LOCK_HELD(hat->hat_as, &hat->hat_as->a_lock));
1646 + ASSERT(hat == kas.a_hat || AS_LOCK_HELD(hat->hat_as));
1650 1647 ASSERT((flags & supported_memload_flags) == flags);
1651 1648
1652 1649 /*
1653 1650 * memload is used for memory with full caching enabled, so
1654 1651 * set HAT_STORECACHING_OK.
1655 1652 */
1656 1653 attr |= HAT_STORECACHING_OK;
1657 1654
1658 1655 /*
1659 1656 * handle all pages using largest possible pagesize
1660 1657 */
1661 1658 while (va < eaddr) {
1662 1659 /*
1663 1660 * decide what level mapping to use (ie. pagesize)
1664 1661 */
1665 1662 pfn = page_pptonum(pages[pgindx]);
1666 1663 for (level = mmu.max_page_level; ; --level) {
1667 1664 pgsize = LEVEL_SIZE(level);
1668 1665 if (level == 0)
1669 1666 break;
1670 1667
1671 1668 if (!IS_P2ALIGNED(va, pgsize) ||
1672 1669 (eaddr - va) < pgsize ||
1673 1670 !IS_P2ALIGNED(pfn_to_pa(pfn), pgsize))
1674 1671 continue;
1675 1672
1676 1673 /*
1677 1674 * To use a large mapping of this size, all the
1678 1675 * pages we are passed must be sequential subpages
1679 1676 * of the large page.
1680 1677 * hat_page_demote() can't change p_szc because
1681 1678 * all pages are locked.
1682 1679 */
1683 1680 if (pages[pgindx]->p_szc >= level) {
1684 1681 for (i = 0; i < mmu_btop(pgsize); ++i) {
1685 1682 if (pfn + i !=
1686 1683 page_pptonum(pages[pgindx + i]))
1687 1684 break;
1688 1685 ASSERT(pages[pgindx + i]->p_szc >=
1689 1686 level);
1690 1687 ASSERT(pages[pgindx] + i ==
1691 1688 pages[pgindx + i]);
1692 1689 }
1693 1690 if (i == mmu_btop(pgsize)) {
1694 1691 #ifdef DEBUG
1695 1692 if (level == 2)
1696 1693 map1gcnt++;
1697 1694 #endif
1698 1695 break;
1699 1696 }
1700 1697 }
1701 1698 }
1702 1699
1703 1700 /*
1704 1701 * Load this page mapping. If the load fails, try a smaller
1705 1702 * pagesize.
1706 1703 */
1707 1704 ASSERT(!IN_VA_HOLE(va));
1708 1705 while (hati_load_common(hat, va, pages[pgindx], attr,
1709 1706 flags, level, pfn) != 0) {
1710 1707 if (level == 0)
1711 1708 panic("unexpected hati_load_common() failure");
1712 1709 --level;
1713 1710 pgsize = LEVEL_SIZE(level);
1714 1711 }
1715 1712
1716 1713 /*
1717 1714 * move to next page
1718 1715 */
1719 1716 va += pgsize;
1720 1717 pgindx += mmu_btop(pgsize);
1721 1718 }
1722 1719 XPV_ALLOW_MIGRATE();
1723 1720 }
1724 1721
1725 1722 /* ARGSUSED */
1726 1723 void
1727 1724 hat_memload_array_region(struct hat *hat, caddr_t addr, size_t len,
1728 1725 struct page **pps, uint_t attr, uint_t flags,
1729 1726 hat_region_cookie_t rcookie)
1730 1727 {
1731 1728 hat_memload_array(hat, addr, len, pps, attr, flags);
1732 1729 }
1733 1730
1734 1731 /*
1735 1732 * void hat_devload(hat, addr, len, pf, attr, flags)
1736 1733 * load/lock the given page frame number
1737 1734 *
1738 1735 * Advisory ordering attributes. Apply only to device mappings.
1739 1736 *
1740 1737 * HAT_STRICTORDER: the CPU must issue the references in order, as the
1741 1738 * programmer specified. This is the default.
1742 1739 * HAT_UNORDERED_OK: the CPU may reorder the references (this is all kinds
1743 1740 * of reordering; store or load with store or load).
1744 1741 * HAT_MERGING_OK: merging and batching: the CPU may merge individual stores
1745 1742 * to consecutive locations (for example, turn two consecutive byte
1746 1743 * stores into one halfword store), and it may batch individual loads
1747 1744 * (for example, turn two consecutive byte loads into one halfword load).
1748 1745 * This also implies re-ordering.
1749 1746 * HAT_LOADCACHING_OK: the CPU may cache the data it fetches and reuse it
1750 1747 * until another store occurs. The default is to fetch new data
1751 1748 * on every load. This also implies merging.
1752 1749 * HAT_STORECACHING_OK: the CPU may keep the data in the cache and push it to
1753 1750 * the device (perhaps with other data) at a later time. The default is
1754 1751 * to push the data right away. This also implies load caching.
1755 1752 *
1756 1753 * Equivalent of hat_memload(), but can be used for device memory where
1757 1754 * there are no page_t's and we support additional flags (write merging, etc).
1758 1755 * Note that we can have large page mappings with this interface.
1759 1756 */
1760 1757 int supported_devload_flags = HAT_LOAD | HAT_LOAD_LOCK |
1761 1758 HAT_LOAD_NOCONSIST | HAT_STRICTORDER | HAT_UNORDERED_OK |
1762 1759 HAT_MERGING_OK | HAT_LOADCACHING_OK | HAT_STORECACHING_OK;
1763 1760
1764 1761 void
1765 1762 hat_devload(
1766 1763 hat_t *hat,
1767 1764 caddr_t addr,
1768 1765 size_t len,
1769 1766 pfn_t pfn,
1770 1767 uint_t attr,
1771 1768 int flags)
1772 1769 {
1773 1770 uintptr_t va = ALIGN2PAGE(addr);
↓ open down ↓ |
114 lines elided |
↑ open up ↑ |
1774 1771 uintptr_t eva = va + len;
1775 1772 level_t level;
1776 1773 size_t pgsize;
1777 1774 page_t *pp;
1778 1775 int f; /* per PTE copy of flags - maybe modified */
1779 1776 uint_t a; /* per PTE copy of attr */
1780 1777
1781 1778 XPV_DISALLOW_MIGRATE();
1782 1779 ASSERT(IS_PAGEALIGNED(va));
1783 1780 ASSERT(hat == kas.a_hat || eva <= _userlimit);
1784 - ASSERT(hat == kas.a_hat ||
1785 - AS_LOCK_HELD(hat->hat_as, &hat->hat_as->a_lock));
1781 + ASSERT(hat == kas.a_hat || AS_LOCK_HELD(hat->hat_as));
1786 1782 ASSERT((flags & supported_devload_flags) == flags);
1787 1783
1788 1784 /*
1789 1785 * handle all pages
1790 1786 */
1791 1787 while (va < eva) {
1792 1788
1793 1789 /*
1794 1790 * decide what level mapping to use (ie. pagesize)
1795 1791 */
1796 1792 for (level = mmu.max_page_level; ; --level) {
1797 1793 pgsize = LEVEL_SIZE(level);
1798 1794 if (level == 0)
1799 1795 break;
1800 1796 if (IS_P2ALIGNED(va, pgsize) &&
1801 1797 (eva - va) >= pgsize &&
1802 1798 IS_P2ALIGNED(pfn, mmu_btop(pgsize))) {
1803 1799 #ifdef DEBUG
1804 1800 if (level == 2)
1805 1801 map1gcnt++;
1806 1802 #endif
1807 1803 break;
1808 1804 }
1809 1805 }
1810 1806
1811 1807 /*
1812 1808 * If this is just memory then allow caching (this happens
1813 1809 * for the nucleus pages) - though HAT_PLAT_NOCACHE can be used
1814 1810 * to override that. If we don't have a page_t then make sure
1815 1811 * NOCONSIST is set.
1816 1812 */
1817 1813 a = attr;
1818 1814 f = flags;
1819 1815 if (!pf_is_memory(pfn))
1820 1816 f |= HAT_LOAD_NOCONSIST;
1821 1817 else if (!(a & HAT_PLAT_NOCACHE))
1822 1818 a |= HAT_STORECACHING_OK;
1823 1819
1824 1820 if (f & HAT_LOAD_NOCONSIST)
1825 1821 pp = NULL;
1826 1822 else
1827 1823 pp = page_numtopp_nolock(pfn);
1828 1824
1829 1825 /*
1830 1826 * Check to make sure we are really trying to map a valid
1831 1827 * memory page. The caller wishing to intentionally map
1832 1828 * free memory pages will have passed the HAT_LOAD_NOCONSIST
1833 1829 * flag, then pp will be NULL.
1834 1830 */
1835 1831 if (pp != NULL) {
1836 1832 if (PP_ISFREE(pp)) {
1837 1833 panic("hat_devload: loading "
1838 1834 "a mapping to free page %p", (void *)pp);
1839 1835 }
1840 1836
1841 1837 if (!PAGE_LOCKED(pp) && !PP_ISNORELOC(pp)) {
1842 1838 panic("hat_devload: loading a mapping "
1843 1839 "to an unlocked page %p",
1844 1840 (void *)pp);
1845 1841 }
1846 1842 }
1847 1843
1848 1844 /*
1849 1845 * load this page mapping
1850 1846 */
1851 1847 ASSERT(!IN_VA_HOLE(va));
1852 1848 while (hati_load_common(hat, va, pp, a, f, level, pfn) != 0) {
1853 1849 if (level == 0)
1854 1850 panic("unexpected hati_load_common() failure");
1855 1851 --level;
1856 1852 pgsize = LEVEL_SIZE(level);
1857 1853 }
1858 1854
1859 1855 /*
1860 1856 * move to next page
1861 1857 */
1862 1858 va += pgsize;
1863 1859 pfn += mmu_btop(pgsize);
1864 1860 }
1865 1861 XPV_ALLOW_MIGRATE();
1866 1862 }
1867 1863
1868 1864 /*
1869 1865 * void hat_unlock(hat, addr, len)
1870 1866 * unlock the mappings to a given range of addresses
1871 1867 *
1872 1868 * Locks are tracked by ht_lock_cnt in the htable.
1873 1869 */
1874 1870 void
1875 1871 hat_unlock(hat_t *hat, caddr_t addr, size_t len)
1876 1872 {
1877 1873 uintptr_t vaddr = (uintptr_t)addr;
1878 1874 uintptr_t eaddr = vaddr + len;
1879 1875 htable_t *ht = NULL;
1880 1876
1881 1877 /*
1882 1878 * kernel entries are always locked, we don't track lock counts
↓ open down ↓ |
87 lines elided |
↑ open up ↑ |
1883 1879 */
1884 1880 ASSERT(hat == kas.a_hat || eaddr <= _userlimit);
1885 1881 ASSERT(IS_PAGEALIGNED(vaddr));
1886 1882 ASSERT(IS_PAGEALIGNED(eaddr));
1887 1883 if (hat == kas.a_hat)
1888 1884 return;
1889 1885 if (eaddr > _userlimit)
1890 1886 panic("hat_unlock() address out of range - above _userlimit");
1891 1887
1892 1888 XPV_DISALLOW_MIGRATE();
1893 - ASSERT(AS_LOCK_HELD(hat->hat_as, &hat->hat_as->a_lock));
1889 + ASSERT(AS_LOCK_HELD(hat->hat_as));
1894 1890 while (vaddr < eaddr) {
1895 1891 (void) htable_walk(hat, &ht, &vaddr, eaddr);
1896 1892 if (ht == NULL)
1897 1893 break;
1898 1894
1899 1895 ASSERT(!IN_VA_HOLE(vaddr));
1900 1896
1901 1897 if (ht->ht_lock_cnt < 1)
1902 1898 panic("hat_unlock(): lock_cnt < 1, "
1903 1899 "htable=%p, vaddr=%p\n", (void *)ht, (void *)vaddr);
1904 1900 HTABLE_LOCK_DEC(ht);
1905 1901
1906 1902 vaddr += LEVEL_SIZE(ht->ht_level);
1907 1903 }
1908 1904 if (ht)
1909 1905 htable_release(ht);
1910 1906 XPV_ALLOW_MIGRATE();
1911 1907 }
1912 1908
1913 1909 /* ARGSUSED */
1914 1910 void
1915 1911 hat_unlock_region(struct hat *hat, caddr_t addr, size_t len,
1916 1912 hat_region_cookie_t rcookie)
1917 1913 {
1918 1914 panic("No shared region support on x86");
1919 1915 }
1920 1916
1921 1917 #if !defined(__xpv)
1922 1918 /*
1923 1919 * Cross call service routine to demap a virtual page on
1924 1920 * the current CPU or flush all mappings in TLB.
1925 1921 */
1926 1922 /*ARGSUSED*/
1927 1923 static int
1928 1924 hati_demap_func(xc_arg_t a1, xc_arg_t a2, xc_arg_t a3)
1929 1925 {
1930 1926 hat_t *hat = (hat_t *)a1;
1931 1927 caddr_t addr = (caddr_t)a2;
1932 1928 size_t len = (size_t)a3;
1933 1929
1934 1930 /*
1935 1931 * If the target hat isn't the kernel and this CPU isn't operating
1936 1932 * in the target hat, we can ignore the cross call.
1937 1933 */
1938 1934 if (hat != kas.a_hat && hat != CPU->cpu_current_hat)
1939 1935 return (0);
1940 1936
1941 1937 /*
1942 1938 * For a normal address, we flush a range of contiguous mappings
1943 1939 */
1944 1940 if ((uintptr_t)addr != DEMAP_ALL_ADDR) {
1945 1941 for (size_t i = 0; i < len; i += MMU_PAGESIZE)
1946 1942 mmu_tlbflush_entry(addr + i);
1947 1943 return (0);
1948 1944 }
1949 1945
1950 1946 /*
1951 1947 * Otherwise we reload cr3 to effect a complete TLB flush.
1952 1948 *
1953 1949 * A reload of cr3 on a VLP process also means we must also recopy in
1954 1950 * the pte values from the struct hat
1955 1951 */
1956 1952 if (hat->hat_flags & HAT_VLP) {
1957 1953 #if defined(__amd64)
1958 1954 x86pte_t *vlpptep = CPU->cpu_hat_info->hci_vlp_l2ptes;
1959 1955
1960 1956 VLP_COPY(hat->hat_vlp_ptes, vlpptep);
1961 1957 #elif defined(__i386)
1962 1958 reload_pae32(hat, CPU);
1963 1959 #endif
1964 1960 }
1965 1961 reload_cr3();
1966 1962 return (0);
1967 1963 }
1968 1964
1969 1965 /*
1970 1966 * Flush all TLB entries, including global (ie. kernel) ones.
1971 1967 */
1972 1968 static void
1973 1969 flush_all_tlb_entries(void)
1974 1970 {
1975 1971 ulong_t cr4 = getcr4();
1976 1972
1977 1973 if (cr4 & CR4_PGE) {
1978 1974 setcr4(cr4 & ~(ulong_t)CR4_PGE);
1979 1975 setcr4(cr4);
1980 1976
1981 1977 /*
1982 1978 * 32 bit PAE also needs to always reload_cr3()
1983 1979 */
1984 1980 if (mmu.max_level == 2)
1985 1981 reload_cr3();
1986 1982 } else {
1987 1983 reload_cr3();
1988 1984 }
1989 1985 }
1990 1986
1991 1987 #define TLB_CPU_HALTED (01ul)
1992 1988 #define TLB_INVAL_ALL (02ul)
1993 1989 #define CAS_TLB_INFO(cpu, old, new) \
1994 1990 atomic_cas_ulong((ulong_t *)&(cpu)->cpu_m.mcpu_tlb_info, (old), (new))
1995 1991
1996 1992 /*
1997 1993 * Record that a CPU is going idle
1998 1994 */
1999 1995 void
2000 1996 tlb_going_idle(void)
2001 1997 {
2002 1998 atomic_or_ulong((ulong_t *)&CPU->cpu_m.mcpu_tlb_info, TLB_CPU_HALTED);
2003 1999 }
2004 2000
2005 2001 /*
2006 2002 * Service a delayed TLB flush if coming out of being idle.
2007 2003 * It will be called from cpu idle notification with interrupt disabled.
2008 2004 */
2009 2005 void
2010 2006 tlb_service(void)
2011 2007 {
2012 2008 ulong_t tlb_info;
2013 2009 ulong_t found;
2014 2010
2015 2011 /*
2016 2012 * We only have to do something if coming out of being idle.
2017 2013 */
2018 2014 tlb_info = CPU->cpu_m.mcpu_tlb_info;
2019 2015 if (tlb_info & TLB_CPU_HALTED) {
2020 2016 ASSERT(CPU->cpu_current_hat == kas.a_hat);
2021 2017
2022 2018 /*
2023 2019 * Atomic clear and fetch of old state.
2024 2020 */
2025 2021 while ((found = CAS_TLB_INFO(CPU, tlb_info, 0)) != tlb_info) {
2026 2022 ASSERT(found & TLB_CPU_HALTED);
2027 2023 tlb_info = found;
2028 2024 SMT_PAUSE();
2029 2025 }
2030 2026 if (tlb_info & TLB_INVAL_ALL)
2031 2027 flush_all_tlb_entries();
2032 2028 }
2033 2029 }
2034 2030 #endif /* !__xpv */
2035 2031
2036 2032 /*
2037 2033 * Internal routine to do cross calls to invalidate a range of pages on
2038 2034 * all CPUs using a given hat.
2039 2035 */
2040 2036 void
2041 2037 hat_tlb_inval_range(hat_t *hat, uintptr_t va, size_t len)
2042 2038 {
2043 2039 extern int flushes_require_xcalls; /* from mp_startup.c */
2044 2040 cpuset_t justme;
2045 2041 cpuset_t cpus_to_shootdown;
2046 2042 #ifndef __xpv
2047 2043 cpuset_t check_cpus;
2048 2044 cpu_t *cpup;
2049 2045 int c;
2050 2046 #endif
2051 2047
2052 2048 /*
2053 2049 * If the hat is being destroyed, there are no more users, so
2054 2050 * demap need not do anything.
2055 2051 */
2056 2052 if (hat->hat_flags & HAT_FREEING)
2057 2053 return;
2058 2054
2059 2055 /*
2060 2056 * If demapping from a shared pagetable, we best demap the
2061 2057 * entire set of user TLBs, since we don't know what addresses
2062 2058 * these were shared at.
2063 2059 */
2064 2060 if (hat->hat_flags & HAT_SHARED) {
2065 2061 hat = kas.a_hat;
2066 2062 va = DEMAP_ALL_ADDR;
2067 2063 }
2068 2064
2069 2065 /*
2070 2066 * if not running with multiple CPUs, don't use cross calls
2071 2067 */
2072 2068 if (panicstr || !flushes_require_xcalls) {
2073 2069 #ifdef __xpv
2074 2070 if (va == DEMAP_ALL_ADDR) {
2075 2071 xen_flush_tlb();
2076 2072 } else {
2077 2073 for (size_t i = 0; i < len; i += MMU_PAGESIZE)
2078 2074 xen_flush_va((caddr_t)(va + i));
2079 2075 }
2080 2076 #else
2081 2077 (void) hati_demap_func((xc_arg_t)hat,
2082 2078 (xc_arg_t)va, (xc_arg_t)len);
2083 2079 #endif
2084 2080 return;
2085 2081 }
2086 2082
2087 2083
2088 2084 /*
2089 2085 * Determine CPUs to shootdown. Kernel changes always do all CPUs.
2090 2086 * Otherwise it's just CPUs currently executing in this hat.
2091 2087 */
2092 2088 kpreempt_disable();
2093 2089 CPUSET_ONLY(justme, CPU->cpu_id);
2094 2090 if (hat == kas.a_hat)
2095 2091 cpus_to_shootdown = khat_cpuset;
2096 2092 else
2097 2093 cpus_to_shootdown = hat->hat_cpus;
2098 2094
2099 2095 #ifndef __xpv
2100 2096 /*
2101 2097 * If any CPUs in the set are idle, just request a delayed flush
2102 2098 * and avoid waking them up.
2103 2099 */
2104 2100 check_cpus = cpus_to_shootdown;
2105 2101 for (c = 0; c < NCPU && !CPUSET_ISNULL(check_cpus); ++c) {
2106 2102 ulong_t tlb_info;
2107 2103
2108 2104 if (!CPU_IN_SET(check_cpus, c))
2109 2105 continue;
2110 2106 CPUSET_DEL(check_cpus, c);
2111 2107 cpup = cpu[c];
2112 2108 if (cpup == NULL)
2113 2109 continue;
2114 2110
2115 2111 tlb_info = cpup->cpu_m.mcpu_tlb_info;
2116 2112 while (tlb_info == TLB_CPU_HALTED) {
2117 2113 (void) CAS_TLB_INFO(cpup, TLB_CPU_HALTED,
2118 2114 TLB_CPU_HALTED | TLB_INVAL_ALL);
2119 2115 SMT_PAUSE();
2120 2116 tlb_info = cpup->cpu_m.mcpu_tlb_info;
2121 2117 }
2122 2118 if (tlb_info == (TLB_CPU_HALTED | TLB_INVAL_ALL)) {
2123 2119 HATSTAT_INC(hs_tlb_inval_delayed);
2124 2120 CPUSET_DEL(cpus_to_shootdown, c);
2125 2121 }
2126 2122 }
2127 2123 #endif
2128 2124
2129 2125 if (CPUSET_ISNULL(cpus_to_shootdown) ||
2130 2126 CPUSET_ISEQUAL(cpus_to_shootdown, justme)) {
2131 2127
2132 2128 #ifdef __xpv
2133 2129 if (va == DEMAP_ALL_ADDR) {
2134 2130 xen_flush_tlb();
2135 2131 } else {
2136 2132 for (size_t i = 0; i < len; i += MMU_PAGESIZE)
2137 2133 xen_flush_va((caddr_t)(va + i));
2138 2134 }
2139 2135 #else
2140 2136 (void) hati_demap_func((xc_arg_t)hat,
2141 2137 (xc_arg_t)va, (xc_arg_t)len);
2142 2138 #endif
2143 2139
2144 2140 } else {
2145 2141
2146 2142 CPUSET_ADD(cpus_to_shootdown, CPU->cpu_id);
2147 2143 #ifdef __xpv
2148 2144 if (va == DEMAP_ALL_ADDR) {
2149 2145 xen_gflush_tlb(cpus_to_shootdown);
2150 2146 } else {
2151 2147 for (size_t i = 0; i < len; i += MMU_PAGESIZE) {
2152 2148 xen_gflush_va((caddr_t)(va + i),
2153 2149 cpus_to_shootdown);
2154 2150 }
2155 2151 }
2156 2152 #else
2157 2153 xc_call((xc_arg_t)hat, (xc_arg_t)va, (xc_arg_t)len,
2158 2154 CPUSET2BV(cpus_to_shootdown), hati_demap_func);
2159 2155 #endif
2160 2156
2161 2157 }
2162 2158 kpreempt_enable();
2163 2159 }
2164 2160
2165 2161 void
2166 2162 hat_tlb_inval(hat_t *hat, uintptr_t va)
2167 2163 {
2168 2164 hat_tlb_inval_range(hat, va, MMU_PAGESIZE);
2169 2165 }
2170 2166
2171 2167 /*
2172 2168 * Interior routine for HAT_UNLOADs from hat_unload_callback(),
2173 2169 * hat_kmap_unload() OR from hat_steal() code. This routine doesn't
2174 2170 * handle releasing of the htables.
2175 2171 */
2176 2172 void
2177 2173 hat_pte_unmap(
2178 2174 htable_t *ht,
2179 2175 uint_t entry,
2180 2176 uint_t flags,
2181 2177 x86pte_t old_pte,
2182 2178 void *pte_ptr,
2183 2179 boolean_t tlb)
2184 2180 {
2185 2181 hat_t *hat = ht->ht_hat;
2186 2182 hment_t *hm = NULL;
2187 2183 page_t *pp = NULL;
2188 2184 level_t l = ht->ht_level;
2189 2185 pfn_t pfn;
2190 2186
2191 2187 /*
2192 2188 * We always track the locking counts, even if nothing is unmapped
2193 2189 */
2194 2190 if ((flags & HAT_UNLOAD_UNLOCK) != 0 && hat != kas.a_hat) {
2195 2191 ASSERT(ht->ht_lock_cnt > 0);
2196 2192 HTABLE_LOCK_DEC(ht);
2197 2193 }
2198 2194
2199 2195 /*
2200 2196 * Figure out which page's mapping list lock to acquire using the PFN
2201 2197 * passed in "old" PTE. We then attempt to invalidate the PTE.
2202 2198 * If another thread, probably a hat_pageunload, has asynchronously
2203 2199 * unmapped/remapped this address we'll loop here.
2204 2200 */
2205 2201 ASSERT(ht->ht_busy > 0);
2206 2202 while (PTE_ISVALID(old_pte)) {
2207 2203 pfn = PTE2PFN(old_pte, l);
2208 2204 if (PTE_GET(old_pte, PT_SOFTWARE) >= PT_NOCONSIST) {
2209 2205 pp = NULL;
2210 2206 } else {
2211 2207 #ifdef __xpv
2212 2208 if (pfn == PFN_INVALID)
2213 2209 panic("Invalid PFN, but not PT_NOCONSIST");
2214 2210 #endif
2215 2211 pp = page_numtopp_nolock(pfn);
2216 2212 if (pp == NULL) {
2217 2213 panic("no page_t, not NOCONSIST: old_pte="
2218 2214 FMT_PTE " ht=%lx entry=0x%x pte_ptr=%lx",
2219 2215 old_pte, (uintptr_t)ht, entry,
2220 2216 (uintptr_t)pte_ptr);
2221 2217 }
2222 2218 x86_hm_enter(pp);
2223 2219 }
2224 2220
2225 2221 old_pte = x86pte_inval(ht, entry, old_pte, pte_ptr, tlb);
2226 2222
2227 2223 /*
2228 2224 * If the page hadn't changed we've unmapped it and can proceed
2229 2225 */
2230 2226 if (PTE_ISVALID(old_pte) && PTE2PFN(old_pte, l) == pfn)
2231 2227 break;
2232 2228
2233 2229 /*
2234 2230 * Otherwise, we'll have to retry with the current old_pte.
2235 2231 * Drop the hment lock, since the pfn may have changed.
2236 2232 */
2237 2233 if (pp != NULL) {
2238 2234 x86_hm_exit(pp);
2239 2235 pp = NULL;
2240 2236 } else {
2241 2237 ASSERT(PTE_GET(old_pte, PT_SOFTWARE) >= PT_NOCONSIST);
2242 2238 }
2243 2239 }
2244 2240
2245 2241 /*
2246 2242 * If the old mapping wasn't valid, there's nothing more to do
2247 2243 */
2248 2244 if (!PTE_ISVALID(old_pte)) {
2249 2245 if (pp != NULL)
2250 2246 x86_hm_exit(pp);
2251 2247 return;
2252 2248 }
2253 2249
2254 2250 /*
2255 2251 * Take care of syncing any MOD/REF bits and removing the hment.
2256 2252 */
2257 2253 if (pp != NULL) {
2258 2254 if (!(flags & HAT_UNLOAD_NOSYNC))
2259 2255 hati_sync_pte_to_page(pp, old_pte, l);
2260 2256 hm = hment_remove(pp, ht, entry);
2261 2257 x86_hm_exit(pp);
2262 2258 if (hm != NULL)
2263 2259 hment_free(hm);
2264 2260 }
2265 2261
2266 2262 /*
2267 2263 * Handle book keeping in the htable and hat
2268 2264 */
2269 2265 ASSERT(ht->ht_valid_cnt > 0);
2270 2266 HTABLE_DEC(ht->ht_valid_cnt);
2271 2267 PGCNT_DEC(hat, l);
2272 2268 }
2273 2269
2274 2270 /*
2275 2271 * very cheap unload implementation to special case some kernel addresses
2276 2272 */
2277 2273 static void
2278 2274 hat_kmap_unload(caddr_t addr, size_t len, uint_t flags)
2279 2275 {
2280 2276 uintptr_t va = (uintptr_t)addr;
2281 2277 uintptr_t eva = va + len;
2282 2278 pgcnt_t pg_index;
2283 2279 htable_t *ht;
2284 2280 uint_t entry;
2285 2281 x86pte_t *pte_ptr;
2286 2282 x86pte_t old_pte;
2287 2283
2288 2284 for (; va < eva; va += MMU_PAGESIZE) {
2289 2285 /*
2290 2286 * Get the PTE
2291 2287 */
2292 2288 pg_index = mmu_btop(va - mmu.kmap_addr);
2293 2289 pte_ptr = PT_INDEX_PTR(mmu.kmap_ptes, pg_index);
2294 2290 old_pte = GET_PTE(pte_ptr);
2295 2291
2296 2292 /*
2297 2293 * get the htable / entry
2298 2294 */
2299 2295 ht = mmu.kmap_htables[(va - mmu.kmap_htables[0]->ht_vaddr)
2300 2296 >> LEVEL_SHIFT(1)];
2301 2297 entry = htable_va2entry(va, ht);
2302 2298
2303 2299 /*
2304 2300 * use mostly common code to unmap it.
2305 2301 */
2306 2302 hat_pte_unmap(ht, entry, flags, old_pte, pte_ptr, B_TRUE);
2307 2303 }
2308 2304 }
2309 2305
2310 2306
2311 2307 /*
2312 2308 * unload a range of virtual address space (no callback)
2313 2309 */
2314 2310 void
2315 2311 hat_unload(hat_t *hat, caddr_t addr, size_t len, uint_t flags)
2316 2312 {
2317 2313 uintptr_t va = (uintptr_t)addr;
2318 2314
2319 2315 XPV_DISALLOW_MIGRATE();
2320 2316 ASSERT(hat == kas.a_hat || va + len <= _userlimit);
2321 2317
2322 2318 /*
2323 2319 * special case for performance.
2324 2320 */
2325 2321 if (mmu.kmap_addr <= va && va < mmu.kmap_eaddr) {
2326 2322 ASSERT(hat == kas.a_hat);
2327 2323 hat_kmap_unload(addr, len, flags);
2328 2324 } else {
2329 2325 hat_unload_callback(hat, addr, len, flags, NULL);
2330 2326 }
2331 2327 XPV_ALLOW_MIGRATE();
2332 2328 }
2333 2329
2334 2330 /*
2335 2331 * Do the callbacks for ranges being unloaded.
2336 2332 */
2337 2333 typedef struct range_info {
2338 2334 uintptr_t rng_va;
2339 2335 ulong_t rng_cnt;
2340 2336 level_t rng_level;
2341 2337 } range_info_t;
2342 2338
2343 2339 /*
2344 2340 * Invalidate the TLB, and perform the callback to the upper level VM system,
2345 2341 * for the specified ranges of contiguous pages.
2346 2342 */
2347 2343 static void
2348 2344 handle_ranges(hat_t *hat, hat_callback_t *cb, uint_t cnt, range_info_t *range)
2349 2345 {
2350 2346 while (cnt > 0) {
2351 2347 size_t len;
2352 2348
2353 2349 --cnt;
2354 2350 len = range[cnt].rng_cnt << LEVEL_SHIFT(range[cnt].rng_level);
2355 2351 hat_tlb_inval_range(hat, (uintptr_t)range[cnt].rng_va, len);
2356 2352
2357 2353 if (cb != NULL) {
2358 2354 cb->hcb_start_addr = (caddr_t)range[cnt].rng_va;
2359 2355 cb->hcb_end_addr = cb->hcb_start_addr;
2360 2356 cb->hcb_end_addr += len;
2361 2357 cb->hcb_function(cb);
2362 2358 }
2363 2359 }
2364 2360 }
2365 2361
2366 2362 /*
2367 2363 * Unload a given range of addresses (has optional callback)
2368 2364 *
2369 2365 * Flags:
2370 2366 * define HAT_UNLOAD 0x00
2371 2367 * define HAT_UNLOAD_NOSYNC 0x02
2372 2368 * define HAT_UNLOAD_UNLOCK 0x04
2373 2369 * define HAT_UNLOAD_OTHER 0x08 - not used
2374 2370 * define HAT_UNLOAD_UNMAP 0x10 - same as HAT_UNLOAD
2375 2371 */
2376 2372 #define MAX_UNLOAD_CNT (8)
2377 2373 void
2378 2374 hat_unload_callback(
2379 2375 hat_t *hat,
2380 2376 caddr_t addr,
2381 2377 size_t len,
2382 2378 uint_t flags,
2383 2379 hat_callback_t *cb)
2384 2380 {
2385 2381 uintptr_t vaddr = (uintptr_t)addr;
2386 2382 uintptr_t eaddr = vaddr + len;
2387 2383 htable_t *ht = NULL;
2388 2384 uint_t entry;
2389 2385 uintptr_t contig_va = (uintptr_t)-1L;
2390 2386 range_info_t r[MAX_UNLOAD_CNT];
2391 2387 uint_t r_cnt = 0;
2392 2388 x86pte_t old_pte;
2393 2389
2394 2390 XPV_DISALLOW_MIGRATE();
2395 2391 ASSERT(hat == kas.a_hat || eaddr <= _userlimit);
2396 2392 ASSERT(IS_PAGEALIGNED(vaddr));
2397 2393 ASSERT(IS_PAGEALIGNED(eaddr));
2398 2394
2399 2395 /*
2400 2396 * Special case a single page being unloaded for speed. This happens
2401 2397 * quite frequently, COW faults after a fork() for example.
2402 2398 */
2403 2399 if (cb == NULL && len == MMU_PAGESIZE) {
2404 2400 ht = htable_getpte(hat, vaddr, &entry, &old_pte, 0);
2405 2401 if (ht != NULL) {
2406 2402 if (PTE_ISVALID(old_pte)) {
2407 2403 hat_pte_unmap(ht, entry, flags, old_pte,
2408 2404 NULL, B_TRUE);
2409 2405 }
2410 2406 htable_release(ht);
2411 2407 }
2412 2408 XPV_ALLOW_MIGRATE();
2413 2409 return;
2414 2410 }
2415 2411
2416 2412 while (vaddr < eaddr) {
2417 2413 old_pte = htable_walk(hat, &ht, &vaddr, eaddr);
2418 2414 if (ht == NULL)
2419 2415 break;
2420 2416
2421 2417 ASSERT(!IN_VA_HOLE(vaddr));
2422 2418
2423 2419 if (vaddr < (uintptr_t)addr)
2424 2420 panic("hat_unload_callback(): unmap inside large page");
2425 2421
2426 2422 /*
2427 2423 * We'll do the call backs for contiguous ranges
2428 2424 */
2429 2425 if (vaddr != contig_va ||
2430 2426 (r_cnt > 0 && r[r_cnt - 1].rng_level != ht->ht_level)) {
2431 2427 if (r_cnt == MAX_UNLOAD_CNT) {
2432 2428 handle_ranges(hat, cb, r_cnt, r);
2433 2429 r_cnt = 0;
2434 2430 }
2435 2431 r[r_cnt].rng_va = vaddr;
2436 2432 r[r_cnt].rng_cnt = 0;
2437 2433 r[r_cnt].rng_level = ht->ht_level;
2438 2434 ++r_cnt;
2439 2435 }
2440 2436
2441 2437 /*
2442 2438 * Unload one mapping (for a single page) from the page tables.
2443 2439 * Note that we do not remove the mapping from the TLB yet,
2444 2440 * as indicated by the tlb=FALSE argument to hat_pte_unmap().
2445 2441 * handle_ranges() will clear the TLB entries with one call to
2446 2442 * hat_tlb_inval_range() per contiguous range. This is
2447 2443 * safe because the page can not be reused until the
2448 2444 * callback is made (or we return).
2449 2445 */
2450 2446 entry = htable_va2entry(vaddr, ht);
2451 2447 hat_pte_unmap(ht, entry, flags, old_pte, NULL, B_FALSE);
2452 2448 ASSERT(ht->ht_level <= mmu.max_page_level);
2453 2449 vaddr += LEVEL_SIZE(ht->ht_level);
2454 2450 contig_va = vaddr;
2455 2451 ++r[r_cnt - 1].rng_cnt;
2456 2452 }
2457 2453 if (ht)
2458 2454 htable_release(ht);
2459 2455
2460 2456 /*
2461 2457 * handle last range for callbacks
2462 2458 */
2463 2459 if (r_cnt > 0)
2464 2460 handle_ranges(hat, cb, r_cnt, r);
2465 2461 XPV_ALLOW_MIGRATE();
2466 2462 }
2467 2463
2468 2464 /*
2469 2465 * Invalidate a virtual address translation on a slave CPU during
2470 2466 * panic() dumps.
2471 2467 */
2472 2468 void
2473 2469 hat_flush_range(hat_t *hat, caddr_t va, size_t size)
2474 2470 {
2475 2471 ssize_t sz;
2476 2472 caddr_t endva = va + size;
2477 2473
2478 2474 while (va < endva) {
2479 2475 sz = hat_getpagesize(hat, va);
2480 2476 if (sz < 0) {
2481 2477 #ifdef __xpv
2482 2478 xen_flush_tlb();
2483 2479 #else
2484 2480 flush_all_tlb_entries();
2485 2481 #endif
2486 2482 break;
2487 2483 }
2488 2484 #ifdef __xpv
2489 2485 xen_flush_va(va);
2490 2486 #else
2491 2487 mmu_tlbflush_entry(va);
2492 2488 #endif
2493 2489 va += sz;
2494 2490 }
2495 2491 }
2496 2492
2497 2493 /*
2498 2494 * synchronize mapping with software data structures
2499 2495 *
2500 2496 * This interface is currently only used by the working set monitor
2501 2497 * driver.
2502 2498 */
2503 2499 /*ARGSUSED*/
2504 2500 void
2505 2501 hat_sync(hat_t *hat, caddr_t addr, size_t len, uint_t flags)
2506 2502 {
2507 2503 uintptr_t vaddr = (uintptr_t)addr;
2508 2504 uintptr_t eaddr = vaddr + len;
2509 2505 htable_t *ht = NULL;
2510 2506 uint_t entry;
2511 2507 x86pte_t pte;
2512 2508 x86pte_t save_pte;
2513 2509 x86pte_t new;
2514 2510 page_t *pp;
2515 2511
2516 2512 ASSERT(!IN_VA_HOLE(vaddr));
2517 2513 ASSERT(IS_PAGEALIGNED(vaddr));
2518 2514 ASSERT(IS_PAGEALIGNED(eaddr));
2519 2515 ASSERT(hat == kas.a_hat || eaddr <= _userlimit);
2520 2516
2521 2517 XPV_DISALLOW_MIGRATE();
2522 2518 for (; vaddr < eaddr; vaddr += LEVEL_SIZE(ht->ht_level)) {
2523 2519 try_again:
2524 2520 pte = htable_walk(hat, &ht, &vaddr, eaddr);
2525 2521 if (ht == NULL)
2526 2522 break;
2527 2523 entry = htable_va2entry(vaddr, ht);
2528 2524
2529 2525 if (PTE_GET(pte, PT_SOFTWARE) >= PT_NOSYNC ||
2530 2526 PTE_GET(pte, PT_REF | PT_MOD) == 0)
2531 2527 continue;
2532 2528
2533 2529 /*
2534 2530 * We need to acquire the mapping list lock to protect
2535 2531 * against hat_pageunload(), hat_unload(), etc.
2536 2532 */
2537 2533 pp = page_numtopp_nolock(PTE2PFN(pte, ht->ht_level));
2538 2534 if (pp == NULL)
2539 2535 break;
2540 2536 x86_hm_enter(pp);
2541 2537 save_pte = pte;
2542 2538 pte = x86pte_get(ht, entry);
2543 2539 if (pte != save_pte) {
2544 2540 x86_hm_exit(pp);
2545 2541 goto try_again;
2546 2542 }
2547 2543 if (PTE_GET(pte, PT_SOFTWARE) >= PT_NOSYNC ||
2548 2544 PTE_GET(pte, PT_REF | PT_MOD) == 0) {
2549 2545 x86_hm_exit(pp);
2550 2546 continue;
2551 2547 }
2552 2548
2553 2549 /*
2554 2550 * Need to clear ref or mod bits. We may compete with
2555 2551 * hardware updating the R/M bits and have to try again.
2556 2552 */
2557 2553 if (flags == HAT_SYNC_ZERORM) {
2558 2554 new = pte;
2559 2555 PTE_CLR(new, PT_REF | PT_MOD);
2560 2556 pte = hati_update_pte(ht, entry, pte, new);
2561 2557 if (pte != 0) {
2562 2558 x86_hm_exit(pp);
2563 2559 goto try_again;
2564 2560 }
2565 2561 } else {
2566 2562 /*
2567 2563 * sync the PTE to the page_t
2568 2564 */
2569 2565 hati_sync_pte_to_page(pp, save_pte, ht->ht_level);
2570 2566 }
2571 2567 x86_hm_exit(pp);
2572 2568 }
2573 2569 if (ht)
2574 2570 htable_release(ht);
2575 2571 XPV_ALLOW_MIGRATE();
2576 2572 }
2577 2573
2578 2574 /*
2579 2575 * void hat_map(hat, addr, len, flags)
2580 2576 */
2581 2577 /*ARGSUSED*/
2582 2578 void
2583 2579 hat_map(hat_t *hat, caddr_t addr, size_t len, uint_t flags)
2584 2580 {
2585 2581 /* does nothing */
2586 2582 }
2587 2583
2588 2584 /*
2589 2585 * uint_t hat_getattr(hat, addr, *attr)
2590 2586 * returns attr for <hat,addr> in *attr. returns 0 if there was a
2591 2587 * mapping and *attr is valid, nonzero if there was no mapping and
2592 2588 * *attr is not valid.
2593 2589 */
2594 2590 uint_t
2595 2591 hat_getattr(hat_t *hat, caddr_t addr, uint_t *attr)
2596 2592 {
2597 2593 uintptr_t vaddr = ALIGN2PAGE(addr);
2598 2594 htable_t *ht = NULL;
2599 2595 x86pte_t pte;
2600 2596
2601 2597 ASSERT(hat == kas.a_hat || vaddr <= _userlimit);
2602 2598
2603 2599 if (IN_VA_HOLE(vaddr))
2604 2600 return ((uint_t)-1);
2605 2601
2606 2602 ht = htable_getpte(hat, vaddr, NULL, &pte, mmu.max_page_level);
2607 2603 if (ht == NULL)
2608 2604 return ((uint_t)-1);
2609 2605
2610 2606 if (!PTE_ISVALID(pte) || !PTE_ISPAGE(pte, ht->ht_level)) {
2611 2607 htable_release(ht);
2612 2608 return ((uint_t)-1);
2613 2609 }
2614 2610
2615 2611 *attr = PROT_READ;
2616 2612 if (PTE_GET(pte, PT_WRITABLE))
2617 2613 *attr |= PROT_WRITE;
2618 2614 if (PTE_GET(pte, PT_USER))
2619 2615 *attr |= PROT_USER;
2620 2616 if (!PTE_GET(pte, mmu.pt_nx))
2621 2617 *attr |= PROT_EXEC;
2622 2618 if (PTE_GET(pte, PT_SOFTWARE) >= PT_NOSYNC)
2623 2619 *attr |= HAT_NOSYNC;
2624 2620 htable_release(ht);
2625 2621 return (0);
2626 2622 }
2627 2623
2628 2624 /*
2629 2625 * hat_updateattr() applies the given attribute change to an existing mapping
2630 2626 */
2631 2627 #define HAT_LOAD_ATTR 1
2632 2628 #define HAT_SET_ATTR 2
2633 2629 #define HAT_CLR_ATTR 3
2634 2630
2635 2631 static void
2636 2632 hat_updateattr(hat_t *hat, caddr_t addr, size_t len, uint_t attr, int what)
2637 2633 {
↓ open down ↓ |
734 lines elided |
↑ open up ↑ |
2638 2634 uintptr_t vaddr = (uintptr_t)addr;
2639 2635 uintptr_t eaddr = (uintptr_t)addr + len;
2640 2636 htable_t *ht = NULL;
2641 2637 uint_t entry;
2642 2638 x86pte_t oldpte, newpte;
2643 2639 page_t *pp;
2644 2640
2645 2641 XPV_DISALLOW_MIGRATE();
2646 2642 ASSERT(IS_PAGEALIGNED(vaddr));
2647 2643 ASSERT(IS_PAGEALIGNED(eaddr));
2648 - ASSERT(hat == kas.a_hat ||
2649 - AS_LOCK_HELD(hat->hat_as, &hat->hat_as->a_lock));
2644 + ASSERT(hat == kas.a_hat || AS_LOCK_HELD(hat->hat_as));
2650 2645 for (; vaddr < eaddr; vaddr += LEVEL_SIZE(ht->ht_level)) {
2651 2646 try_again:
2652 2647 oldpte = htable_walk(hat, &ht, &vaddr, eaddr);
2653 2648 if (ht == NULL)
2654 2649 break;
2655 2650 if (PTE_GET(oldpte, PT_SOFTWARE) >= PT_NOCONSIST)
2656 2651 continue;
2657 2652
2658 2653 pp = page_numtopp_nolock(PTE2PFN(oldpte, ht->ht_level));
2659 2654 if (pp == NULL)
2660 2655 continue;
2661 2656 x86_hm_enter(pp);
2662 2657
2663 2658 newpte = oldpte;
2664 2659 /*
2665 2660 * We found a page table entry in the desired range,
2666 2661 * figure out the new attributes.
2667 2662 */
2668 2663 if (what == HAT_SET_ATTR || what == HAT_LOAD_ATTR) {
2669 2664 if ((attr & PROT_WRITE) &&
2670 2665 !PTE_GET(oldpte, PT_WRITABLE))
2671 2666 newpte |= PT_WRITABLE;
2672 2667
2673 2668 if ((attr & HAT_NOSYNC) &&
2674 2669 PTE_GET(oldpte, PT_SOFTWARE) < PT_NOSYNC)
2675 2670 newpte |= PT_NOSYNC;
2676 2671
2677 2672 if ((attr & PROT_EXEC) && PTE_GET(oldpte, mmu.pt_nx))
2678 2673 newpte &= ~mmu.pt_nx;
2679 2674 }
2680 2675
2681 2676 if (what == HAT_LOAD_ATTR) {
2682 2677 if (!(attr & PROT_WRITE) &&
2683 2678 PTE_GET(oldpte, PT_WRITABLE))
2684 2679 newpte &= ~PT_WRITABLE;
2685 2680
2686 2681 if (!(attr & HAT_NOSYNC) &&
2687 2682 PTE_GET(oldpte, PT_SOFTWARE) >= PT_NOSYNC)
2688 2683 newpte &= ~PT_SOFTWARE;
2689 2684
2690 2685 if (!(attr & PROT_EXEC) && !PTE_GET(oldpte, mmu.pt_nx))
2691 2686 newpte |= mmu.pt_nx;
2692 2687 }
2693 2688
2694 2689 if (what == HAT_CLR_ATTR) {
2695 2690 if ((attr & PROT_WRITE) && PTE_GET(oldpte, PT_WRITABLE))
2696 2691 newpte &= ~PT_WRITABLE;
2697 2692
2698 2693 if ((attr & HAT_NOSYNC) &&
2699 2694 PTE_GET(oldpte, PT_SOFTWARE) >= PT_NOSYNC)
2700 2695 newpte &= ~PT_SOFTWARE;
2701 2696
2702 2697 if ((attr & PROT_EXEC) && !PTE_GET(oldpte, mmu.pt_nx))
2703 2698 newpte |= mmu.pt_nx;
2704 2699 }
2705 2700
2706 2701 /*
2707 2702 * Ensure NOSYNC/NOCONSIST mappings have REF and MOD set.
2708 2703 * x86pte_set() depends on this.
2709 2704 */
2710 2705 if (PTE_GET(newpte, PT_SOFTWARE) >= PT_NOSYNC)
2711 2706 newpte |= PT_REF | PT_MOD;
2712 2707
2713 2708 /*
2714 2709 * what about PROT_READ or others? this code only handles:
2715 2710 * EXEC, WRITE, NOSYNC
2716 2711 */
2717 2712
2718 2713 /*
2719 2714 * If new PTE really changed, update the table.
2720 2715 */
2721 2716 if (newpte != oldpte) {
2722 2717 entry = htable_va2entry(vaddr, ht);
2723 2718 oldpte = hati_update_pte(ht, entry, oldpte, newpte);
2724 2719 if (oldpte != 0) {
2725 2720 x86_hm_exit(pp);
2726 2721 goto try_again;
2727 2722 }
2728 2723 }
2729 2724 x86_hm_exit(pp);
2730 2725 }
2731 2726 if (ht)
2732 2727 htable_release(ht);
2733 2728 XPV_ALLOW_MIGRATE();
2734 2729 }
2735 2730
2736 2731 /*
2737 2732 * Various wrappers for hat_updateattr()
2738 2733 */
2739 2734 void
2740 2735 hat_setattr(hat_t *hat, caddr_t addr, size_t len, uint_t attr)
2741 2736 {
2742 2737 ASSERT(hat == kas.a_hat || (uintptr_t)addr + len <= _userlimit);
2743 2738 hat_updateattr(hat, addr, len, attr, HAT_SET_ATTR);
2744 2739 }
2745 2740
2746 2741 void
2747 2742 hat_clrattr(hat_t *hat, caddr_t addr, size_t len, uint_t attr)
2748 2743 {
2749 2744 ASSERT(hat == kas.a_hat || (uintptr_t)addr + len <= _userlimit);
2750 2745 hat_updateattr(hat, addr, len, attr, HAT_CLR_ATTR);
2751 2746 }
2752 2747
2753 2748 void
2754 2749 hat_chgattr(hat_t *hat, caddr_t addr, size_t len, uint_t attr)
2755 2750 {
2756 2751 ASSERT(hat == kas.a_hat || (uintptr_t)addr + len <= _userlimit);
2757 2752 hat_updateattr(hat, addr, len, attr, HAT_LOAD_ATTR);
2758 2753 }
2759 2754
2760 2755 void
2761 2756 hat_chgprot(hat_t *hat, caddr_t addr, size_t len, uint_t vprot)
2762 2757 {
2763 2758 ASSERT(hat == kas.a_hat || (uintptr_t)addr + len <= _userlimit);
2764 2759 hat_updateattr(hat, addr, len, vprot & HAT_PROT_MASK, HAT_LOAD_ATTR);
2765 2760 }
2766 2761
2767 2762 /*
2768 2763 * size_t hat_getpagesize(hat, addr)
2769 2764 * returns pagesize in bytes for <hat, addr>. returns -1 of there is
2770 2765 * no mapping. This is an advisory call.
2771 2766 */
2772 2767 ssize_t
2773 2768 hat_getpagesize(hat_t *hat, caddr_t addr)
2774 2769 {
2775 2770 uintptr_t vaddr = ALIGN2PAGE(addr);
2776 2771 htable_t *ht;
2777 2772 size_t pagesize;
2778 2773
2779 2774 ASSERT(hat == kas.a_hat || vaddr <= _userlimit);
2780 2775 if (IN_VA_HOLE(vaddr))
2781 2776 return (-1);
2782 2777 ht = htable_getpage(hat, vaddr, NULL);
2783 2778 if (ht == NULL)
2784 2779 return (-1);
2785 2780 pagesize = LEVEL_SIZE(ht->ht_level);
2786 2781 htable_release(ht);
2787 2782 return (pagesize);
2788 2783 }
2789 2784
2790 2785
2791 2786
2792 2787 /*
2793 2788 * pfn_t hat_getpfnum(hat, addr)
2794 2789 * returns pfn for <hat, addr> or PFN_INVALID if mapping is invalid.
2795 2790 */
2796 2791 pfn_t
2797 2792 hat_getpfnum(hat_t *hat, caddr_t addr)
2798 2793 {
2799 2794 uintptr_t vaddr = ALIGN2PAGE(addr);
2800 2795 htable_t *ht;
2801 2796 uint_t entry;
2802 2797 pfn_t pfn = PFN_INVALID;
2803 2798
2804 2799 ASSERT(hat == kas.a_hat || vaddr <= _userlimit);
2805 2800 if (khat_running == 0)
2806 2801 return (PFN_INVALID);
2807 2802
2808 2803 if (IN_VA_HOLE(vaddr))
2809 2804 return (PFN_INVALID);
2810 2805
2811 2806 XPV_DISALLOW_MIGRATE();
2812 2807 /*
2813 2808 * A very common use of hat_getpfnum() is from the DDI for kernel pages.
2814 2809 * Use the kmap_ptes (which also covers the 32 bit heap) to speed
2815 2810 * this up.
2816 2811 */
2817 2812 if (mmu.kmap_addr <= vaddr && vaddr < mmu.kmap_eaddr) {
2818 2813 x86pte_t pte;
2819 2814 pgcnt_t pg_index;
2820 2815
2821 2816 pg_index = mmu_btop(vaddr - mmu.kmap_addr);
2822 2817 pte = GET_PTE(PT_INDEX_PTR(mmu.kmap_ptes, pg_index));
2823 2818 if (PTE_ISVALID(pte))
2824 2819 /*LINTED [use of constant 0 causes a lint warning] */
2825 2820 pfn = PTE2PFN(pte, 0);
2826 2821 XPV_ALLOW_MIGRATE();
2827 2822 return (pfn);
2828 2823 }
2829 2824
2830 2825 ht = htable_getpage(hat, vaddr, &entry);
2831 2826 if (ht == NULL) {
2832 2827 XPV_ALLOW_MIGRATE();
2833 2828 return (PFN_INVALID);
2834 2829 }
2835 2830 ASSERT(vaddr >= ht->ht_vaddr);
2836 2831 ASSERT(vaddr <= HTABLE_LAST_PAGE(ht));
2837 2832 pfn = PTE2PFN(x86pte_get(ht, entry), ht->ht_level);
2838 2833 if (ht->ht_level > 0)
2839 2834 pfn += mmu_btop(vaddr & LEVEL_OFFSET(ht->ht_level));
2840 2835 htable_release(ht);
2841 2836 XPV_ALLOW_MIGRATE();
2842 2837 return (pfn);
2843 2838 }
2844 2839
2845 2840 /*
2846 2841 * int hat_probe(hat, addr)
2847 2842 * return 0 if no valid mapping is present. Faster version
2848 2843 * of hat_getattr in certain architectures.
↓ open down ↓ |
189 lines elided |
↑ open up ↑ |
2849 2844 */
2850 2845 int
2851 2846 hat_probe(hat_t *hat, caddr_t addr)
2852 2847 {
2853 2848 uintptr_t vaddr = ALIGN2PAGE(addr);
2854 2849 uint_t entry;
2855 2850 htable_t *ht;
2856 2851 pgcnt_t pg_off;
2857 2852
2858 2853 ASSERT(hat == kas.a_hat || vaddr <= _userlimit);
2859 - ASSERT(hat == kas.a_hat ||
2860 - AS_LOCK_HELD(hat->hat_as, &hat->hat_as->a_lock));
2854 + ASSERT(hat == kas.a_hat || AS_LOCK_HELD(hat->hat_as));
2861 2855 if (IN_VA_HOLE(vaddr))
2862 2856 return (0);
2863 2857
2864 2858 /*
2865 2859 * Most common use of hat_probe is from segmap. We special case it
2866 2860 * for performance.
2867 2861 */
2868 2862 if (mmu.kmap_addr <= vaddr && vaddr < mmu.kmap_eaddr) {
2869 2863 pg_off = mmu_btop(vaddr - mmu.kmap_addr);
2870 2864 if (mmu.pae_hat)
2871 2865 return (PTE_ISVALID(mmu.kmap_ptes[pg_off]));
2872 2866 else
2873 2867 return (PTE_ISVALID(
2874 2868 ((x86pte32_t *)mmu.kmap_ptes)[pg_off]));
2875 2869 }
2876 2870
2877 2871 ht = htable_getpage(hat, vaddr, &entry);
2878 2872 htable_release(ht);
2879 2873 return (ht != NULL);
2880 2874 }
2881 2875
2882 2876 /*
2883 2877 * Find out if the segment for hat_share()/hat_unshare() is DISM or locked ISM.
2884 2878 */
2885 2879 static int
2886 2880 is_it_dism(hat_t *hat, caddr_t va)
2887 2881 {
2888 2882 struct seg *seg;
2889 2883 struct shm_data *shmd;
2890 2884 struct spt_data *sptd;
2891 2885
2892 2886 seg = as_findseg(hat->hat_as, va, 0);
2893 2887 ASSERT(seg != NULL);
2894 2888 ASSERT(seg->s_base <= va);
2895 2889 shmd = (struct shm_data *)seg->s_data;
2896 2890 ASSERT(shmd != NULL);
2897 2891 sptd = (struct spt_data *)shmd->shm_sptseg->s_data;
2898 2892 ASSERT(sptd != NULL);
2899 2893 if (sptd->spt_flags & SHM_PAGEABLE)
2900 2894 return (1);
2901 2895 return (0);
2902 2896 }
2903 2897
2904 2898 /*
2905 2899 * Simple implementation of ISM. hat_share() is similar to hat_memload_array(),
2906 2900 * except that we use the ism_hat's existing mappings to determine the pages
2907 2901 * and protections to use for this hat. If we find a full properly aligned
2908 2902 * and sized pagetable, we will attempt to share the pagetable itself.
2909 2903 */
2910 2904 /*ARGSUSED*/
2911 2905 int
2912 2906 hat_share(
2913 2907 hat_t *hat,
2914 2908 caddr_t addr,
2915 2909 hat_t *ism_hat,
2916 2910 caddr_t src_addr,
2917 2911 size_t len, /* almost useless value, see below.. */
2918 2912 uint_t ismszc)
2919 2913 {
2920 2914 uintptr_t vaddr_start = (uintptr_t)addr;
2921 2915 uintptr_t vaddr;
2922 2916 uintptr_t eaddr = vaddr_start + len;
2923 2917 uintptr_t ism_addr_start = (uintptr_t)src_addr;
2924 2918 uintptr_t ism_addr = ism_addr_start;
2925 2919 uintptr_t e_ism_addr = ism_addr + len;
2926 2920 htable_t *ism_ht = NULL;
2927 2921 htable_t *ht;
2928 2922 x86pte_t pte;
2929 2923 page_t *pp;
2930 2924 pfn_t pfn;
2931 2925 level_t l;
2932 2926 pgcnt_t pgcnt;
2933 2927 uint_t prot;
2934 2928 int is_dism;
2935 2929 int flags;
2936 2930
2937 2931 /*
2938 2932 * We might be asked to share an empty DISM hat by as_dup()
2939 2933 */
2940 2934 ASSERT(hat != kas.a_hat);
2941 2935 ASSERT(eaddr <= _userlimit);
2942 2936 if (!(ism_hat->hat_flags & HAT_SHARED)) {
2943 2937 ASSERT(hat_get_mapped_size(ism_hat) == 0);
2944 2938 return (0);
2945 2939 }
2946 2940 XPV_DISALLOW_MIGRATE();
2947 2941
2948 2942 /*
2949 2943 * The SPT segment driver often passes us a size larger than there are
2950 2944 * valid mappings. That's because it rounds the segment size up to a
2951 2945 * large pagesize, even if the actual memory mapped by ism_hat is less.
2952 2946 */
2953 2947 ASSERT(IS_PAGEALIGNED(vaddr_start));
2954 2948 ASSERT(IS_PAGEALIGNED(ism_addr_start));
2955 2949 ASSERT(ism_hat->hat_flags & HAT_SHARED);
2956 2950 is_dism = is_it_dism(hat, addr);
2957 2951 while (ism_addr < e_ism_addr) {
2958 2952 /*
2959 2953 * use htable_walk to get the next valid ISM mapping
2960 2954 */
2961 2955 pte = htable_walk(ism_hat, &ism_ht, &ism_addr, e_ism_addr);
2962 2956 if (ism_ht == NULL)
2963 2957 break;
2964 2958
2965 2959 /*
2966 2960 * First check to see if we already share the page table.
2967 2961 */
2968 2962 l = ism_ht->ht_level;
2969 2963 vaddr = vaddr_start + (ism_addr - ism_addr_start);
2970 2964 ht = htable_lookup(hat, vaddr, l);
2971 2965 if (ht != NULL) {
2972 2966 if (ht->ht_flags & HTABLE_SHARED_PFN)
2973 2967 goto shared;
2974 2968 htable_release(ht);
2975 2969 goto not_shared;
2976 2970 }
2977 2971
2978 2972 /*
2979 2973 * Can't ever share top table.
2980 2974 */
2981 2975 if (l == mmu.max_level)
2982 2976 goto not_shared;
2983 2977
2984 2978 /*
2985 2979 * Avoid level mismatches later due to DISM faults.
2986 2980 */
2987 2981 if (is_dism && l > 0)
2988 2982 goto not_shared;
2989 2983
2990 2984 /*
2991 2985 * addresses and lengths must align
2992 2986 * table must be fully populated
2993 2987 * no lower level page tables
2994 2988 */
2995 2989 if (ism_addr != ism_ht->ht_vaddr ||
2996 2990 (vaddr & LEVEL_OFFSET(l + 1)) != 0)
2997 2991 goto not_shared;
2998 2992
2999 2993 /*
3000 2994 * The range of address space must cover a full table.
3001 2995 */
3002 2996 if (e_ism_addr - ism_addr < LEVEL_SIZE(l + 1))
3003 2997 goto not_shared;
3004 2998
3005 2999 /*
3006 3000 * All entries in the ISM page table must be leaf PTEs.
3007 3001 */
3008 3002 if (l > 0) {
3009 3003 int e;
3010 3004
3011 3005 /*
3012 3006 * We know the 0th is from htable_walk() above.
3013 3007 */
3014 3008 for (e = 1; e < HTABLE_NUM_PTES(ism_ht); ++e) {
3015 3009 x86pte_t pte;
3016 3010 pte = x86pte_get(ism_ht, e);
3017 3011 if (!PTE_ISPAGE(pte, l))
3018 3012 goto not_shared;
3019 3013 }
3020 3014 }
3021 3015
3022 3016 /*
3023 3017 * share the page table
3024 3018 */
3025 3019 ht = htable_create(hat, vaddr, l, ism_ht);
3026 3020 shared:
3027 3021 ASSERT(ht->ht_flags & HTABLE_SHARED_PFN);
3028 3022 ASSERT(ht->ht_shares == ism_ht);
3029 3023 hat->hat_ism_pgcnt +=
3030 3024 (ism_ht->ht_valid_cnt - ht->ht_valid_cnt) <<
3031 3025 (LEVEL_SHIFT(ht->ht_level) - MMU_PAGESHIFT);
3032 3026 ht->ht_valid_cnt = ism_ht->ht_valid_cnt;
3033 3027 htable_release(ht);
3034 3028 ism_addr = ism_ht->ht_vaddr + LEVEL_SIZE(l + 1);
3035 3029 htable_release(ism_ht);
3036 3030 ism_ht = NULL;
3037 3031 continue;
3038 3032
3039 3033 not_shared:
3040 3034 /*
3041 3035 * Unable to share the page table. Instead we will
3042 3036 * create new mappings from the values in the ISM mappings.
3043 3037 * Figure out what level size mappings to use;
3044 3038 */
3045 3039 for (l = ism_ht->ht_level; l > 0; --l) {
3046 3040 if (LEVEL_SIZE(l) <= eaddr - vaddr &&
3047 3041 (vaddr & LEVEL_OFFSET(l)) == 0)
3048 3042 break;
3049 3043 }
3050 3044
3051 3045 /*
3052 3046 * The ISM mapping might be larger than the share area,
3053 3047 * be careful to truncate it if needed.
3054 3048 */
3055 3049 if (eaddr - vaddr >= LEVEL_SIZE(ism_ht->ht_level)) {
3056 3050 pgcnt = mmu_btop(LEVEL_SIZE(ism_ht->ht_level));
3057 3051 } else {
3058 3052 pgcnt = mmu_btop(eaddr - vaddr);
3059 3053 l = 0;
3060 3054 }
3061 3055
3062 3056 pfn = PTE2PFN(pte, ism_ht->ht_level);
3063 3057 ASSERT(pfn != PFN_INVALID);
3064 3058 while (pgcnt > 0) {
3065 3059 /*
3066 3060 * Make a new pte for the PFN for this level.
3067 3061 * Copy protections for the pte from the ISM pte.
3068 3062 */
3069 3063 pp = page_numtopp_nolock(pfn);
3070 3064 ASSERT(pp != NULL);
3071 3065
3072 3066 prot = PROT_USER | PROT_READ | HAT_UNORDERED_OK;
3073 3067 if (PTE_GET(pte, PT_WRITABLE))
3074 3068 prot |= PROT_WRITE;
3075 3069 if (!PTE_GET(pte, PT_NX))
3076 3070 prot |= PROT_EXEC;
3077 3071
3078 3072 flags = HAT_LOAD;
3079 3073 if (!is_dism)
3080 3074 flags |= HAT_LOAD_LOCK | HAT_LOAD_NOCONSIST;
3081 3075 while (hati_load_common(hat, vaddr, pp, prot, flags,
3082 3076 l, pfn) != 0) {
3083 3077 if (l == 0)
3084 3078 panic("hati_load_common() failure");
3085 3079 --l;
3086 3080 }
3087 3081
3088 3082 vaddr += LEVEL_SIZE(l);
3089 3083 ism_addr += LEVEL_SIZE(l);
3090 3084 pfn += mmu_btop(LEVEL_SIZE(l));
3091 3085 pgcnt -= mmu_btop(LEVEL_SIZE(l));
3092 3086 }
3093 3087 }
3094 3088 if (ism_ht != NULL)
3095 3089 htable_release(ism_ht);
3096 3090 XPV_ALLOW_MIGRATE();
3097 3091 return (0);
3098 3092 }
3099 3093
3100 3094
3101 3095 /*
3102 3096 * hat_unshare() is similar to hat_unload_callback(), but
3103 3097 * we have to look for empty shared pagetables. Note that
3104 3098 * hat_unshare() is always invoked against an entire segment.
3105 3099 */
3106 3100 /*ARGSUSED*/
3107 3101 void
3108 3102 hat_unshare(hat_t *hat, caddr_t addr, size_t len, uint_t ismszc)
3109 3103 {
3110 3104 uint64_t vaddr = (uintptr_t)addr;
3111 3105 uintptr_t eaddr = vaddr + len;
3112 3106 htable_t *ht = NULL;
3113 3107 uint_t need_demaps = 0;
3114 3108 int flags = HAT_UNLOAD_UNMAP;
3115 3109 level_t l;
3116 3110
3117 3111 ASSERT(hat != kas.a_hat);
3118 3112 ASSERT(eaddr <= _userlimit);
3119 3113 ASSERT(IS_PAGEALIGNED(vaddr));
3120 3114 ASSERT(IS_PAGEALIGNED(eaddr));
3121 3115 XPV_DISALLOW_MIGRATE();
3122 3116
3123 3117 /*
3124 3118 * First go through and remove any shared pagetables.
3125 3119 *
3126 3120 * Note that it's ok to delay the TLB shootdown till the entire range is
3127 3121 * finished, because if hat_pageunload() were to unload a shared
3128 3122 * pagetable page, its hat_tlb_inval() will do a global TLB invalidate.
3129 3123 */
3130 3124 l = mmu.max_page_level;
3131 3125 if (l == mmu.max_level)
3132 3126 --l;
3133 3127 for (; l >= 0; --l) {
3134 3128 for (vaddr = (uintptr_t)addr; vaddr < eaddr;
3135 3129 vaddr = (vaddr & LEVEL_MASK(l + 1)) + LEVEL_SIZE(l + 1)) {
3136 3130 ASSERT(!IN_VA_HOLE(vaddr));
3137 3131 /*
3138 3132 * find a pagetable that maps the current address
3139 3133 */
3140 3134 ht = htable_lookup(hat, vaddr, l);
3141 3135 if (ht == NULL)
3142 3136 continue;
3143 3137 if (ht->ht_flags & HTABLE_SHARED_PFN) {
3144 3138 /*
3145 3139 * clear page count, set valid_cnt to 0,
3146 3140 * let htable_release() finish the job
3147 3141 */
3148 3142 hat->hat_ism_pgcnt -= ht->ht_valid_cnt <<
3149 3143 (LEVEL_SHIFT(ht->ht_level) - MMU_PAGESHIFT);
3150 3144 ht->ht_valid_cnt = 0;
3151 3145 need_demaps = 1;
3152 3146 }
3153 3147 htable_release(ht);
3154 3148 }
3155 3149 }
3156 3150
3157 3151 /*
3158 3152 * flush the TLBs - since we're probably dealing with MANY mappings
3159 3153 * we do just one CR3 reload.
3160 3154 */
3161 3155 if (!(hat->hat_flags & HAT_FREEING) && need_demaps)
3162 3156 hat_tlb_inval(hat, DEMAP_ALL_ADDR);
3163 3157
3164 3158 /*
3165 3159 * Now go back and clean up any unaligned mappings that
3166 3160 * couldn't share pagetables.
3167 3161 */
3168 3162 if (!is_it_dism(hat, addr))
3169 3163 flags |= HAT_UNLOAD_UNLOCK;
3170 3164 hat_unload(hat, addr, len, flags);
3171 3165 XPV_ALLOW_MIGRATE();
3172 3166 }
3173 3167
3174 3168
3175 3169 /*
3176 3170 * hat_reserve() does nothing
3177 3171 */
3178 3172 /*ARGSUSED*/
3179 3173 void
3180 3174 hat_reserve(struct as *as, caddr_t addr, size_t len)
3181 3175 {
3182 3176 }
3183 3177
3184 3178
3185 3179 /*
3186 3180 * Called when all mappings to a page should have write permission removed.
3187 3181 * Mostly stolen from hat_pagesync()
3188 3182 */
3189 3183 static void
3190 3184 hati_page_clrwrt(struct page *pp)
3191 3185 {
3192 3186 hment_t *hm = NULL;
3193 3187 htable_t *ht;
3194 3188 uint_t entry;
3195 3189 x86pte_t old;
3196 3190 x86pte_t new;
3197 3191 uint_t pszc = 0;
3198 3192
3199 3193 XPV_DISALLOW_MIGRATE();
3200 3194 next_size:
3201 3195 /*
3202 3196 * walk thru the mapping list clearing write permission
3203 3197 */
3204 3198 x86_hm_enter(pp);
3205 3199 while ((hm = hment_walk(pp, &ht, &entry, hm)) != NULL) {
3206 3200 if (ht->ht_level < pszc)
3207 3201 continue;
3208 3202 old = x86pte_get(ht, entry);
3209 3203
3210 3204 for (;;) {
3211 3205 /*
3212 3206 * Is this mapping of interest?
3213 3207 */
3214 3208 if (PTE2PFN(old, ht->ht_level) != pp->p_pagenum ||
3215 3209 PTE_GET(old, PT_WRITABLE) == 0)
3216 3210 break;
3217 3211
3218 3212 /*
3219 3213 * Clear ref/mod writable bits. This requires cross
3220 3214 * calls to ensure any executing TLBs see cleared bits.
3221 3215 */
3222 3216 new = old;
3223 3217 PTE_CLR(new, PT_REF | PT_MOD | PT_WRITABLE);
3224 3218 old = hati_update_pte(ht, entry, old, new);
3225 3219 if (old != 0)
3226 3220 continue;
3227 3221
3228 3222 break;
3229 3223 }
3230 3224 }
3231 3225 x86_hm_exit(pp);
3232 3226 while (pszc < pp->p_szc) {
3233 3227 page_t *tpp;
3234 3228 pszc++;
3235 3229 tpp = PP_GROUPLEADER(pp, pszc);
3236 3230 if (pp != tpp) {
3237 3231 pp = tpp;
3238 3232 goto next_size;
3239 3233 }
3240 3234 }
3241 3235 XPV_ALLOW_MIGRATE();
3242 3236 }
3243 3237
3244 3238 /*
3245 3239 * void hat_page_setattr(pp, flag)
3246 3240 * void hat_page_clrattr(pp, flag)
3247 3241 * used to set/clr ref/mod bits.
3248 3242 */
3249 3243 void
3250 3244 hat_page_setattr(struct page *pp, uint_t flag)
3251 3245 {
3252 3246 vnode_t *vp = pp->p_vnode;
3253 3247 kmutex_t *vphm = NULL;
3254 3248 page_t **listp;
3255 3249 int noshuffle;
3256 3250
3257 3251 noshuffle = flag & P_NSH;
3258 3252 flag &= ~P_NSH;
3259 3253
3260 3254 if (PP_GETRM(pp, flag) == flag)
3261 3255 return;
3262 3256
3263 3257 if ((flag & P_MOD) != 0 && vp != NULL && IS_VMODSORT(vp) &&
3264 3258 !noshuffle) {
3265 3259 vphm = page_vnode_mutex(vp);
3266 3260 mutex_enter(vphm);
3267 3261 }
3268 3262
3269 3263 PP_SETRM(pp, flag);
3270 3264
3271 3265 if (vphm != NULL) {
3272 3266
3273 3267 /*
3274 3268 * Some File Systems examine v_pages for NULL w/o
3275 3269 * grabbing the vphm mutex. Must not let it become NULL when
3276 3270 * pp is the only page on the list.
3277 3271 */
3278 3272 if (pp->p_vpnext != pp) {
3279 3273 page_vpsub(&vp->v_pages, pp);
3280 3274 if (vp->v_pages != NULL)
3281 3275 listp = &vp->v_pages->p_vpprev->p_vpnext;
3282 3276 else
3283 3277 listp = &vp->v_pages;
3284 3278 page_vpadd(listp, pp);
3285 3279 }
3286 3280 mutex_exit(vphm);
3287 3281 }
3288 3282 }
3289 3283
3290 3284 void
3291 3285 hat_page_clrattr(struct page *pp, uint_t flag)
3292 3286 {
3293 3287 vnode_t *vp = pp->p_vnode;
3294 3288 ASSERT(!(flag & ~(P_MOD | P_REF | P_RO)));
3295 3289
3296 3290 /*
3297 3291 * Caller is expected to hold page's io lock for VMODSORT to work
3298 3292 * correctly with pvn_vplist_dirty() and pvn_getdirty() when mod
3299 3293 * bit is cleared.
3300 3294 * We don't have assert to avoid tripping some existing third party
3301 3295 * code. The dirty page is moved back to top of the v_page list
3302 3296 * after IO is done in pvn_write_done().
3303 3297 */
3304 3298 PP_CLRRM(pp, flag);
3305 3299
3306 3300 if ((flag & P_MOD) != 0 && vp != NULL && IS_VMODSORT(vp)) {
3307 3301
3308 3302 /*
3309 3303 * VMODSORT works by removing write permissions and getting
3310 3304 * a fault when a page is made dirty. At this point
3311 3305 * we need to remove write permission from all mappings
3312 3306 * to this page.
3313 3307 */
3314 3308 hati_page_clrwrt(pp);
3315 3309 }
3316 3310 }
3317 3311
3318 3312 /*
3319 3313 * If flag is specified, returns 0 if attribute is disabled
3320 3314 * and non zero if enabled. If flag specifes multiple attributes
3321 3315 * then returns 0 if ALL attributes are disabled. This is an advisory
3322 3316 * call.
3323 3317 */
3324 3318 uint_t
3325 3319 hat_page_getattr(struct page *pp, uint_t flag)
3326 3320 {
3327 3321 return (PP_GETRM(pp, flag));
3328 3322 }
3329 3323
3330 3324
3331 3325 /*
3332 3326 * common code used by hat_pageunload() and hment_steal()
3333 3327 */
3334 3328 hment_t *
3335 3329 hati_page_unmap(page_t *pp, htable_t *ht, uint_t entry)
3336 3330 {
3337 3331 x86pte_t old_pte;
3338 3332 pfn_t pfn = pp->p_pagenum;
3339 3333 hment_t *hm;
3340 3334
3341 3335 /*
3342 3336 * We need to acquire a hold on the htable in order to
3343 3337 * do the invalidate. We know the htable must exist, since
3344 3338 * unmap's don't release the htable until after removing any
3345 3339 * hment. Having x86_hm_enter() keeps that from proceeding.
3346 3340 */
3347 3341 htable_acquire(ht);
3348 3342
3349 3343 /*
3350 3344 * Invalidate the PTE and remove the hment.
3351 3345 */
3352 3346 old_pte = x86pte_inval(ht, entry, 0, NULL, B_TRUE);
3353 3347 if (PTE2PFN(old_pte, ht->ht_level) != pfn) {
3354 3348 panic("x86pte_inval() failure found PTE = " FMT_PTE
3355 3349 " pfn being unmapped is %lx ht=0x%lx entry=0x%x",
3356 3350 old_pte, pfn, (uintptr_t)ht, entry);
3357 3351 }
3358 3352
3359 3353 /*
3360 3354 * Clean up all the htable information for this mapping
3361 3355 */
3362 3356 ASSERT(ht->ht_valid_cnt > 0);
3363 3357 HTABLE_DEC(ht->ht_valid_cnt);
3364 3358 PGCNT_DEC(ht->ht_hat, ht->ht_level);
3365 3359
3366 3360 /*
3367 3361 * sync ref/mod bits to the page_t
3368 3362 */
3369 3363 if (PTE_GET(old_pte, PT_SOFTWARE) < PT_NOSYNC)
3370 3364 hati_sync_pte_to_page(pp, old_pte, ht->ht_level);
3371 3365
3372 3366 /*
3373 3367 * Remove the mapping list entry for this page.
3374 3368 */
3375 3369 hm = hment_remove(pp, ht, entry);
3376 3370
3377 3371 /*
3378 3372 * drop the mapping list lock so that we might free the
3379 3373 * hment and htable.
3380 3374 */
3381 3375 x86_hm_exit(pp);
3382 3376 htable_release(ht);
3383 3377 return (hm);
3384 3378 }
3385 3379
3386 3380 extern int vpm_enable;
3387 3381 /*
3388 3382 * Unload all translations to a page. If the page is a subpage of a large
3389 3383 * page, the large page mappings are also removed.
3390 3384 *
3391 3385 * The forceflags are unused.
3392 3386 */
3393 3387
3394 3388 /*ARGSUSED*/
3395 3389 static int
3396 3390 hati_pageunload(struct page *pp, uint_t pg_szcd, uint_t forceflag)
3397 3391 {
3398 3392 page_t *cur_pp = pp;
3399 3393 hment_t *hm;
3400 3394 hment_t *prev;
3401 3395 htable_t *ht;
3402 3396 uint_t entry;
3403 3397 level_t level;
3404 3398
3405 3399 XPV_DISALLOW_MIGRATE();
3406 3400
3407 3401 /*
3408 3402 * prevent recursion due to kmem_free()
3409 3403 */
3410 3404 ++curthread->t_hatdepth;
3411 3405 ASSERT(curthread->t_hatdepth < 16);
3412 3406
3413 3407 #if defined(__amd64)
3414 3408 /*
3415 3409 * clear the vpm ref.
3416 3410 */
3417 3411 if (vpm_enable) {
3418 3412 pp->p_vpmref = 0;
3419 3413 }
3420 3414 #endif
3421 3415 /*
3422 3416 * The loop with next_size handles pages with multiple pagesize mappings
3423 3417 */
3424 3418 next_size:
3425 3419 for (;;) {
3426 3420
3427 3421 /*
3428 3422 * Get a mapping list entry
3429 3423 */
3430 3424 x86_hm_enter(cur_pp);
3431 3425 for (prev = NULL; ; prev = hm) {
3432 3426 hm = hment_walk(cur_pp, &ht, &entry, prev);
3433 3427 if (hm == NULL) {
3434 3428 x86_hm_exit(cur_pp);
3435 3429
3436 3430 /*
3437 3431 * If not part of a larger page, we're done.
3438 3432 */
3439 3433 if (cur_pp->p_szc <= pg_szcd) {
3440 3434 ASSERT(curthread->t_hatdepth > 0);
3441 3435 --curthread->t_hatdepth;
3442 3436 XPV_ALLOW_MIGRATE();
3443 3437 return (0);
3444 3438 }
3445 3439
3446 3440 /*
3447 3441 * Else check the next larger page size.
3448 3442 * hat_page_demote() may decrease p_szc
3449 3443 * but that's ok we'll just take an extra
3450 3444 * trip discover there're no larger mappings
3451 3445 * and return.
3452 3446 */
3453 3447 ++pg_szcd;
3454 3448 cur_pp = PP_GROUPLEADER(cur_pp, pg_szcd);
3455 3449 goto next_size;
3456 3450 }
3457 3451
3458 3452 /*
3459 3453 * If this mapping size matches, remove it.
3460 3454 */
3461 3455 level = ht->ht_level;
3462 3456 if (level == pg_szcd)
3463 3457 break;
3464 3458 }
3465 3459
3466 3460 /*
3467 3461 * Remove the mapping list entry for this page.
3468 3462 * Note this does the x86_hm_exit() for us.
3469 3463 */
3470 3464 hm = hati_page_unmap(cur_pp, ht, entry);
3471 3465 if (hm != NULL)
3472 3466 hment_free(hm);
3473 3467 }
3474 3468 }
3475 3469
3476 3470 int
3477 3471 hat_pageunload(struct page *pp, uint_t forceflag)
3478 3472 {
3479 3473 ASSERT(PAGE_EXCL(pp));
3480 3474 return (hati_pageunload(pp, 0, forceflag));
3481 3475 }
3482 3476
3483 3477 /*
3484 3478 * Unload all large mappings to pp and reduce by 1 p_szc field of every large
3485 3479 * page level that included pp.
3486 3480 *
3487 3481 * pp must be locked EXCL. Even though no other constituent pages are locked
3488 3482 * it's legal to unload large mappings to pp because all constituent pages of
3489 3483 * large locked mappings have to be locked SHARED. therefore if we have EXCL
3490 3484 * lock on one of constituent pages none of the large mappings to pp are
3491 3485 * locked.
3492 3486 *
3493 3487 * Change (always decrease) p_szc field starting from the last constituent
3494 3488 * page and ending with root constituent page so that root's pszc always shows
3495 3489 * the area where hat_page_demote() may be active.
3496 3490 *
3497 3491 * This mechanism is only used for file system pages where it's not always
3498 3492 * possible to get EXCL locks on all constituent pages to demote the size code
3499 3493 * (as is done for anonymous or kernel large pages).
3500 3494 */
3501 3495 void
3502 3496 hat_page_demote(page_t *pp)
3503 3497 {
3504 3498 uint_t pszc;
3505 3499 uint_t rszc;
3506 3500 uint_t szc;
3507 3501 page_t *rootpp;
3508 3502 page_t *firstpp;
3509 3503 page_t *lastpp;
3510 3504 pgcnt_t pgcnt;
3511 3505
3512 3506 ASSERT(PAGE_EXCL(pp));
3513 3507 ASSERT(!PP_ISFREE(pp));
3514 3508 ASSERT(page_szc_lock_assert(pp));
3515 3509
3516 3510 if (pp->p_szc == 0)
3517 3511 return;
3518 3512
3519 3513 rootpp = PP_GROUPLEADER(pp, 1);
3520 3514 (void) hati_pageunload(rootpp, 1, HAT_FORCE_PGUNLOAD);
3521 3515
3522 3516 /*
3523 3517 * all large mappings to pp are gone
3524 3518 * and no new can be setup since pp is locked exclusively.
3525 3519 *
3526 3520 * Lock the root to make sure there's only one hat_page_demote()
3527 3521 * outstanding within the area of this root's pszc.
3528 3522 *
3529 3523 * Second potential hat_page_demote() is already eliminated by upper
3530 3524 * VM layer via page_szc_lock() but we don't rely on it and use our
3531 3525 * own locking (so that upper layer locking can be changed without
3532 3526 * assumptions that hat depends on upper layer VM to prevent multiple
3533 3527 * hat_page_demote() to be issued simultaneously to the same large
3534 3528 * page).
3535 3529 */
3536 3530 again:
3537 3531 pszc = pp->p_szc;
3538 3532 if (pszc == 0)
3539 3533 return;
3540 3534 rootpp = PP_GROUPLEADER(pp, pszc);
3541 3535 x86_hm_enter(rootpp);
3542 3536 /*
3543 3537 * If root's p_szc is different from pszc we raced with another
3544 3538 * hat_page_demote(). Drop the lock and try to find the root again.
3545 3539 * If root's p_szc is greater than pszc previous hat_page_demote() is
3546 3540 * not done yet. Take and release mlist lock of root's root to wait
3547 3541 * for previous hat_page_demote() to complete.
3548 3542 */
3549 3543 if ((rszc = rootpp->p_szc) != pszc) {
3550 3544 x86_hm_exit(rootpp);
3551 3545 if (rszc > pszc) {
3552 3546 /* p_szc of a locked non free page can't increase */
3553 3547 ASSERT(pp != rootpp);
3554 3548
3555 3549 rootpp = PP_GROUPLEADER(rootpp, rszc);
3556 3550 x86_hm_enter(rootpp);
3557 3551 x86_hm_exit(rootpp);
3558 3552 }
3559 3553 goto again;
3560 3554 }
3561 3555 ASSERT(pp->p_szc == pszc);
3562 3556
3563 3557 /*
3564 3558 * Decrement by 1 p_szc of every constituent page of a region that
3565 3559 * covered pp. For example if original szc is 3 it gets changed to 2
3566 3560 * everywhere except in region 2 that covered pp. Region 2 that
3567 3561 * covered pp gets demoted to 1 everywhere except in region 1 that
3568 3562 * covered pp. The region 1 that covered pp is demoted to region
3569 3563 * 0. It's done this way because from region 3 we removed level 3
3570 3564 * mappings, from region 2 that covered pp we removed level 2 mappings
3571 3565 * and from region 1 that covered pp we removed level 1 mappings. All
3572 3566 * changes are done from from high pfn's to low pfn's so that roots
3573 3567 * are changed last allowing one to know the largest region where
3574 3568 * hat_page_demote() is stil active by only looking at the root page.
3575 3569 *
3576 3570 * This algorithm is implemented in 2 while loops. First loop changes
3577 3571 * p_szc of pages to the right of pp's level 1 region and second
3578 3572 * loop changes p_szc of pages of level 1 region that covers pp
3579 3573 * and all pages to the left of level 1 region that covers pp.
3580 3574 * In the first loop p_szc keeps dropping with every iteration
3581 3575 * and in the second loop it keeps increasing with every iteration.
3582 3576 *
3583 3577 * First loop description: Demote pages to the right of pp outside of
3584 3578 * level 1 region that covers pp. In every iteration of the while
3585 3579 * loop below find the last page of szc region and the first page of
3586 3580 * (szc - 1) region that is immediately to the right of (szc - 1)
3587 3581 * region that covers pp. From last such page to first such page
3588 3582 * change every page's szc to szc - 1. Decrement szc and continue
3589 3583 * looping until szc is 1. If pp belongs to the last (szc - 1) region
3590 3584 * of szc region skip to the next iteration.
3591 3585 */
3592 3586 szc = pszc;
3593 3587 while (szc > 1) {
3594 3588 lastpp = PP_GROUPLEADER(pp, szc);
3595 3589 pgcnt = page_get_pagecnt(szc);
3596 3590 lastpp += pgcnt - 1;
3597 3591 firstpp = PP_GROUPLEADER(pp, (szc - 1));
3598 3592 pgcnt = page_get_pagecnt(szc - 1);
3599 3593 if (lastpp - firstpp < pgcnt) {
3600 3594 szc--;
3601 3595 continue;
3602 3596 }
3603 3597 firstpp += pgcnt;
3604 3598 while (lastpp != firstpp) {
3605 3599 ASSERT(lastpp->p_szc == pszc);
3606 3600 lastpp->p_szc = szc - 1;
3607 3601 lastpp--;
3608 3602 }
3609 3603 firstpp->p_szc = szc - 1;
3610 3604 szc--;
3611 3605 }
3612 3606
3613 3607 /*
3614 3608 * Second loop description:
3615 3609 * First iteration changes p_szc to 0 of every
3616 3610 * page of level 1 region that covers pp.
3617 3611 * Subsequent iterations find last page of szc region
3618 3612 * immediately to the left of szc region that covered pp
3619 3613 * and first page of (szc + 1) region that covers pp.
3620 3614 * From last to first page change p_szc of every page to szc.
3621 3615 * Increment szc and continue looping until szc is pszc.
3622 3616 * If pp belongs to the fist szc region of (szc + 1) region
3623 3617 * skip to the next iteration.
3624 3618 *
3625 3619 */
3626 3620 szc = 0;
3627 3621 while (szc < pszc) {
3628 3622 firstpp = PP_GROUPLEADER(pp, (szc + 1));
3629 3623 if (szc == 0) {
3630 3624 pgcnt = page_get_pagecnt(1);
3631 3625 lastpp = firstpp + (pgcnt - 1);
3632 3626 } else {
3633 3627 lastpp = PP_GROUPLEADER(pp, szc);
3634 3628 if (firstpp == lastpp) {
3635 3629 szc++;
3636 3630 continue;
3637 3631 }
3638 3632 lastpp--;
3639 3633 pgcnt = page_get_pagecnt(szc);
3640 3634 }
3641 3635 while (lastpp != firstpp) {
3642 3636 ASSERT(lastpp->p_szc == pszc);
3643 3637 lastpp->p_szc = szc;
3644 3638 lastpp--;
3645 3639 }
3646 3640 firstpp->p_szc = szc;
3647 3641 if (firstpp == rootpp)
3648 3642 break;
3649 3643 szc++;
3650 3644 }
3651 3645 x86_hm_exit(rootpp);
3652 3646 }
3653 3647
3654 3648 /*
3655 3649 * get hw stats from hardware into page struct and reset hw stats
3656 3650 * returns attributes of page
3657 3651 * Flags for hat_pagesync, hat_getstat, hat_sync
3658 3652 *
3659 3653 * define HAT_SYNC_ZERORM 0x01
3660 3654 *
3661 3655 * Additional flags for hat_pagesync
3662 3656 *
3663 3657 * define HAT_SYNC_STOPON_REF 0x02
3664 3658 * define HAT_SYNC_STOPON_MOD 0x04
3665 3659 * define HAT_SYNC_STOPON_RM 0x06
3666 3660 * define HAT_SYNC_STOPON_SHARED 0x08
3667 3661 */
3668 3662 uint_t
3669 3663 hat_pagesync(struct page *pp, uint_t flags)
3670 3664 {
3671 3665 hment_t *hm = NULL;
3672 3666 htable_t *ht;
3673 3667 uint_t entry;
3674 3668 x86pte_t old, save_old;
3675 3669 x86pte_t new;
3676 3670 uchar_t nrmbits = P_REF|P_MOD|P_RO;
3677 3671 extern ulong_t po_share;
3678 3672 page_t *save_pp = pp;
3679 3673 uint_t pszc = 0;
3680 3674
3681 3675 ASSERT(PAGE_LOCKED(pp) || panicstr);
3682 3676
3683 3677 if (PP_ISRO(pp) && (flags & HAT_SYNC_STOPON_MOD))
3684 3678 return (pp->p_nrm & nrmbits);
3685 3679
3686 3680 if ((flags & HAT_SYNC_ZERORM) == 0) {
3687 3681
3688 3682 if ((flags & HAT_SYNC_STOPON_REF) != 0 && PP_ISREF(pp))
3689 3683 return (pp->p_nrm & nrmbits);
3690 3684
3691 3685 if ((flags & HAT_SYNC_STOPON_MOD) != 0 && PP_ISMOD(pp))
3692 3686 return (pp->p_nrm & nrmbits);
3693 3687
3694 3688 if ((flags & HAT_SYNC_STOPON_SHARED) != 0 &&
3695 3689 hat_page_getshare(pp) > po_share) {
3696 3690 if (PP_ISRO(pp))
3697 3691 PP_SETREF(pp);
3698 3692 return (pp->p_nrm & nrmbits);
3699 3693 }
3700 3694 }
3701 3695
3702 3696 XPV_DISALLOW_MIGRATE();
3703 3697 next_size:
3704 3698 /*
3705 3699 * walk thru the mapping list syncing (and clearing) ref/mod bits.
3706 3700 */
3707 3701 x86_hm_enter(pp);
3708 3702 while ((hm = hment_walk(pp, &ht, &entry, hm)) != NULL) {
3709 3703 if (ht->ht_level < pszc)
3710 3704 continue;
3711 3705 old = x86pte_get(ht, entry);
3712 3706 try_again:
3713 3707
3714 3708 ASSERT(PTE2PFN(old, ht->ht_level) == pp->p_pagenum);
3715 3709
3716 3710 if (PTE_GET(old, PT_REF | PT_MOD) == 0)
3717 3711 continue;
3718 3712
3719 3713 save_old = old;
3720 3714 if ((flags & HAT_SYNC_ZERORM) != 0) {
3721 3715
3722 3716 /*
3723 3717 * Need to clear ref or mod bits. Need to demap
3724 3718 * to make sure any executing TLBs see cleared bits.
3725 3719 */
3726 3720 new = old;
3727 3721 PTE_CLR(new, PT_REF | PT_MOD);
3728 3722 old = hati_update_pte(ht, entry, old, new);
3729 3723 if (old != 0)
3730 3724 goto try_again;
3731 3725
3732 3726 old = save_old;
3733 3727 }
3734 3728
3735 3729 /*
3736 3730 * Sync the PTE
3737 3731 */
3738 3732 if (!(flags & HAT_SYNC_ZERORM) &&
3739 3733 PTE_GET(old, PT_SOFTWARE) <= PT_NOSYNC)
3740 3734 hati_sync_pte_to_page(pp, old, ht->ht_level);
3741 3735
3742 3736 /*
3743 3737 * can stop short if we found a ref'd or mod'd page
3744 3738 */
3745 3739 if ((flags & HAT_SYNC_STOPON_MOD) && PP_ISMOD(save_pp) ||
3746 3740 (flags & HAT_SYNC_STOPON_REF) && PP_ISREF(save_pp)) {
3747 3741 x86_hm_exit(pp);
3748 3742 goto done;
3749 3743 }
3750 3744 }
3751 3745 x86_hm_exit(pp);
3752 3746 while (pszc < pp->p_szc) {
3753 3747 page_t *tpp;
3754 3748 pszc++;
3755 3749 tpp = PP_GROUPLEADER(pp, pszc);
3756 3750 if (pp != tpp) {
3757 3751 pp = tpp;
3758 3752 goto next_size;
3759 3753 }
3760 3754 }
3761 3755 done:
3762 3756 XPV_ALLOW_MIGRATE();
3763 3757 return (save_pp->p_nrm & nrmbits);
3764 3758 }
3765 3759
3766 3760 /*
3767 3761 * returns approx number of mappings to this pp. A return of 0 implies
3768 3762 * there are no mappings to the page.
3769 3763 */
3770 3764 ulong_t
3771 3765 hat_page_getshare(page_t *pp)
3772 3766 {
3773 3767 uint_t cnt;
3774 3768 cnt = hment_mapcnt(pp);
3775 3769 #if defined(__amd64)
3776 3770 if (vpm_enable && pp->p_vpmref) {
3777 3771 cnt += 1;
3778 3772 }
3779 3773 #endif
3780 3774 return (cnt);
3781 3775 }
3782 3776
3783 3777 /*
3784 3778 * Return 1 the number of mappings exceeds sh_thresh. Return 0
3785 3779 * otherwise.
3786 3780 */
3787 3781 int
3788 3782 hat_page_checkshare(page_t *pp, ulong_t sh_thresh)
3789 3783 {
3790 3784 return (hat_page_getshare(pp) > sh_thresh);
3791 3785 }
3792 3786
3793 3787 /*
3794 3788 * hat_softlock isn't supported anymore
3795 3789 */
3796 3790 /*ARGSUSED*/
3797 3791 faultcode_t
3798 3792 hat_softlock(
3799 3793 hat_t *hat,
3800 3794 caddr_t addr,
3801 3795 size_t *len,
3802 3796 struct page **page_array,
3803 3797 uint_t flags)
3804 3798 {
3805 3799 return (FC_NOSUPPORT);
3806 3800 }
3807 3801
3808 3802
3809 3803
3810 3804 /*
3811 3805 * Routine to expose supported HAT features to platform independent code.
3812 3806 */
3813 3807 /*ARGSUSED*/
3814 3808 int
3815 3809 hat_supported(enum hat_features feature, void *arg)
3816 3810 {
3817 3811 switch (feature) {
3818 3812
3819 3813 case HAT_SHARED_PT: /* this is really ISM */
3820 3814 return (1);
3821 3815
3822 3816 case HAT_DYNAMIC_ISM_UNMAP:
3823 3817 return (0);
3824 3818
3825 3819 case HAT_VMODSORT:
3826 3820 return (1);
3827 3821
3828 3822 case HAT_SHARED_REGIONS:
3829 3823 return (0);
3830 3824
3831 3825 default:
3832 3826 panic("hat_supported() - unknown feature");
3833 3827 }
3834 3828 return (0);
3835 3829 }
3836 3830
3837 3831 /*
3838 3832 * Called when a thread is exiting and has been switched to the kernel AS
3839 3833 */
3840 3834 void
3841 3835 hat_thread_exit(kthread_t *thd)
3842 3836 {
3843 3837 ASSERT(thd->t_procp->p_as == &kas);
3844 3838 XPV_DISALLOW_MIGRATE();
3845 3839 hat_switch(thd->t_procp->p_as->a_hat);
3846 3840 XPV_ALLOW_MIGRATE();
3847 3841 }
3848 3842
3849 3843 /*
3850 3844 * Setup the given brand new hat structure as the new HAT on this cpu's mmu.
3851 3845 */
3852 3846 /*ARGSUSED*/
3853 3847 void
3854 3848 hat_setup(hat_t *hat, int flags)
3855 3849 {
3856 3850 XPV_DISALLOW_MIGRATE();
3857 3851 kpreempt_disable();
3858 3852
3859 3853 hat_switch(hat);
3860 3854
3861 3855 kpreempt_enable();
3862 3856 XPV_ALLOW_MIGRATE();
3863 3857 }
3864 3858
3865 3859 /*
3866 3860 * Prepare for a CPU private mapping for the given address.
3867 3861 *
3868 3862 * The address can only be used from a single CPU and can be remapped
3869 3863 * using hat_mempte_remap(). Return the address of the PTE.
3870 3864 *
3871 3865 * We do the htable_create() if necessary and increment the valid count so
3872 3866 * the htable can't disappear. We also hat_devload() the page table into
3873 3867 * kernel so that the PTE is quickly accessed.
3874 3868 */
3875 3869 hat_mempte_t
3876 3870 hat_mempte_setup(caddr_t addr)
3877 3871 {
3878 3872 uintptr_t va = (uintptr_t)addr;
3879 3873 htable_t *ht;
3880 3874 uint_t entry;
3881 3875 x86pte_t oldpte;
3882 3876 hat_mempte_t p;
3883 3877
3884 3878 ASSERT(IS_PAGEALIGNED(va));
3885 3879 ASSERT(!IN_VA_HOLE(va));
3886 3880 ++curthread->t_hatdepth;
3887 3881 XPV_DISALLOW_MIGRATE();
3888 3882 ht = htable_getpte(kas.a_hat, va, &entry, &oldpte, 0);
3889 3883 if (ht == NULL) {
3890 3884 ht = htable_create(kas.a_hat, va, 0, NULL);
3891 3885 entry = htable_va2entry(va, ht);
3892 3886 ASSERT(ht->ht_level == 0);
3893 3887 oldpte = x86pte_get(ht, entry);
3894 3888 }
3895 3889 if (PTE_ISVALID(oldpte))
3896 3890 panic("hat_mempte_setup(): address already mapped"
3897 3891 "ht=%p, entry=%d, pte=" FMT_PTE, (void *)ht, entry, oldpte);
3898 3892
3899 3893 /*
3900 3894 * increment ht_valid_cnt so that the pagetable can't disappear
3901 3895 */
3902 3896 HTABLE_INC(ht->ht_valid_cnt);
3903 3897
3904 3898 /*
3905 3899 * return the PTE physical address to the caller.
3906 3900 */
3907 3901 htable_release(ht);
3908 3902 XPV_ALLOW_MIGRATE();
3909 3903 p = PT_INDEX_PHYSADDR(pfn_to_pa(ht->ht_pfn), entry);
3910 3904 --curthread->t_hatdepth;
3911 3905 return (p);
3912 3906 }
3913 3907
3914 3908 /*
3915 3909 * Release a CPU private mapping for the given address.
3916 3910 * We decrement the htable valid count so it might be destroyed.
3917 3911 */
3918 3912 /*ARGSUSED1*/
3919 3913 void
3920 3914 hat_mempte_release(caddr_t addr, hat_mempte_t pte_pa)
3921 3915 {
3922 3916 htable_t *ht;
3923 3917
3924 3918 XPV_DISALLOW_MIGRATE();
3925 3919 /*
3926 3920 * invalidate any left over mapping and decrement the htable valid count
3927 3921 */
3928 3922 #ifdef __xpv
3929 3923 if (HYPERVISOR_update_va_mapping((uintptr_t)addr, 0,
3930 3924 UVMF_INVLPG | UVMF_LOCAL))
3931 3925 panic("HYPERVISOR_update_va_mapping() failed");
3932 3926 #else
3933 3927 {
3934 3928 x86pte_t *pteptr;
3935 3929
3936 3930 pteptr = x86pte_mapin(mmu_btop(pte_pa),
3937 3931 (pte_pa & MMU_PAGEOFFSET) >> mmu.pte_size_shift, NULL);
3938 3932 if (mmu.pae_hat)
3939 3933 *pteptr = 0;
3940 3934 else
3941 3935 *(x86pte32_t *)pteptr = 0;
3942 3936 mmu_tlbflush_entry(addr);
3943 3937 x86pte_mapout();
3944 3938 }
3945 3939 #endif
3946 3940
3947 3941 ht = htable_getpte(kas.a_hat, ALIGN2PAGE(addr), NULL, NULL, 0);
3948 3942 if (ht == NULL)
3949 3943 panic("hat_mempte_release(): invalid address");
3950 3944 ASSERT(ht->ht_level == 0);
3951 3945 HTABLE_DEC(ht->ht_valid_cnt);
3952 3946 htable_release(ht);
3953 3947 XPV_ALLOW_MIGRATE();
3954 3948 }
3955 3949
3956 3950 /*
3957 3951 * Apply a temporary CPU private mapping to a page. We flush the TLB only
3958 3952 * on this CPU, so this ought to have been called with preemption disabled.
3959 3953 */
3960 3954 void
3961 3955 hat_mempte_remap(
3962 3956 pfn_t pfn,
3963 3957 caddr_t addr,
3964 3958 hat_mempte_t pte_pa,
3965 3959 uint_t attr,
3966 3960 uint_t flags)
3967 3961 {
3968 3962 uintptr_t va = (uintptr_t)addr;
3969 3963 x86pte_t pte;
3970 3964
3971 3965 /*
3972 3966 * Remap the given PTE to the new page's PFN. Invalidate only
3973 3967 * on this CPU.
3974 3968 */
3975 3969 #ifdef DEBUG
3976 3970 htable_t *ht;
3977 3971 uint_t entry;
3978 3972
3979 3973 ASSERT(IS_PAGEALIGNED(va));
3980 3974 ASSERT(!IN_VA_HOLE(va));
3981 3975 ht = htable_getpte(kas.a_hat, va, &entry, NULL, 0);
3982 3976 ASSERT(ht != NULL);
3983 3977 ASSERT(ht->ht_level == 0);
3984 3978 ASSERT(ht->ht_valid_cnt > 0);
3985 3979 ASSERT(ht->ht_pfn == mmu_btop(pte_pa));
3986 3980 htable_release(ht);
3987 3981 #endif
3988 3982 XPV_DISALLOW_MIGRATE();
3989 3983 pte = hati_mkpte(pfn, attr, 0, flags);
3990 3984 #ifdef __xpv
3991 3985 if (HYPERVISOR_update_va_mapping(va, pte, UVMF_INVLPG | UVMF_LOCAL))
3992 3986 panic("HYPERVISOR_update_va_mapping() failed");
3993 3987 #else
3994 3988 {
3995 3989 x86pte_t *pteptr;
3996 3990
3997 3991 pteptr = x86pte_mapin(mmu_btop(pte_pa),
3998 3992 (pte_pa & MMU_PAGEOFFSET) >> mmu.pte_size_shift, NULL);
3999 3993 if (mmu.pae_hat)
4000 3994 *(x86pte_t *)pteptr = pte;
4001 3995 else
4002 3996 *(x86pte32_t *)pteptr = (x86pte32_t)pte;
4003 3997 mmu_tlbflush_entry(addr);
4004 3998 x86pte_mapout();
4005 3999 }
4006 4000 #endif
4007 4001 XPV_ALLOW_MIGRATE();
4008 4002 }
4009 4003
4010 4004
4011 4005
4012 4006 /*
4013 4007 * Hat locking functions
4014 4008 * XXX - these two functions are currently being used by hatstats
4015 4009 * they can be removed by using a per-as mutex for hatstats.
4016 4010 */
4017 4011 void
4018 4012 hat_enter(hat_t *hat)
4019 4013 {
4020 4014 mutex_enter(&hat->hat_mutex);
4021 4015 }
4022 4016
4023 4017 void
4024 4018 hat_exit(hat_t *hat)
4025 4019 {
4026 4020 mutex_exit(&hat->hat_mutex);
4027 4021 }
4028 4022
4029 4023 /*
4030 4024 * HAT part of cpu initialization.
4031 4025 */
4032 4026 void
4033 4027 hat_cpu_online(struct cpu *cpup)
4034 4028 {
4035 4029 if (cpup != CPU) {
4036 4030 x86pte_cpu_init(cpup);
4037 4031 hat_vlp_setup(cpup);
4038 4032 }
4039 4033 CPUSET_ATOMIC_ADD(khat_cpuset, cpup->cpu_id);
4040 4034 }
4041 4035
4042 4036 /*
4043 4037 * HAT part of cpu deletion.
4044 4038 * (currently, we only call this after the cpu is safely passivated.)
4045 4039 */
4046 4040 void
4047 4041 hat_cpu_offline(struct cpu *cpup)
4048 4042 {
4049 4043 ASSERT(cpup != CPU);
4050 4044
4051 4045 CPUSET_ATOMIC_DEL(khat_cpuset, cpup->cpu_id);
4052 4046 hat_vlp_teardown(cpup);
4053 4047 x86pte_cpu_fini(cpup);
4054 4048 }
4055 4049
4056 4050 /*
4057 4051 * Function called after all CPUs are brought online.
4058 4052 * Used to remove low address boot mappings.
4059 4053 */
4060 4054 void
4061 4055 clear_boot_mappings(uintptr_t low, uintptr_t high)
4062 4056 {
4063 4057 uintptr_t vaddr = low;
4064 4058 htable_t *ht = NULL;
4065 4059 level_t level;
4066 4060 uint_t entry;
4067 4061 x86pte_t pte;
4068 4062
4069 4063 /*
4070 4064 * On 1st CPU we can unload the prom mappings, basically we blow away
4071 4065 * all virtual mappings under _userlimit.
4072 4066 */
4073 4067 while (vaddr < high) {
4074 4068 pte = htable_walk(kas.a_hat, &ht, &vaddr, high);
4075 4069 if (ht == NULL)
4076 4070 break;
4077 4071
4078 4072 level = ht->ht_level;
4079 4073 entry = htable_va2entry(vaddr, ht);
4080 4074 ASSERT(level <= mmu.max_page_level);
4081 4075 ASSERT(PTE_ISPAGE(pte, level));
4082 4076
4083 4077 /*
4084 4078 * Unload the mapping from the page tables.
4085 4079 */
4086 4080 (void) x86pte_inval(ht, entry, 0, NULL, B_TRUE);
4087 4081 ASSERT(ht->ht_valid_cnt > 0);
4088 4082 HTABLE_DEC(ht->ht_valid_cnt);
4089 4083 PGCNT_DEC(ht->ht_hat, ht->ht_level);
4090 4084
4091 4085 vaddr += LEVEL_SIZE(ht->ht_level);
4092 4086 }
4093 4087 if (ht)
4094 4088 htable_release(ht);
4095 4089 }
4096 4090
4097 4091 /*
4098 4092 * Atomically update a new translation for a single page. If the
4099 4093 * currently installed PTE doesn't match the value we expect to find,
4100 4094 * it's not updated and we return the PTE we found.
4101 4095 *
4102 4096 * If activating nosync or NOWRITE and the page was modified we need to sync
4103 4097 * with the page_t. Also sync with page_t if clearing ref/mod bits.
4104 4098 */
4105 4099 static x86pte_t
4106 4100 hati_update_pte(htable_t *ht, uint_t entry, x86pte_t expected, x86pte_t new)
4107 4101 {
4108 4102 page_t *pp;
4109 4103 uint_t rm = 0;
4110 4104 x86pte_t replaced;
4111 4105
4112 4106 if (PTE_GET(expected, PT_SOFTWARE) < PT_NOSYNC &&
4113 4107 PTE_GET(expected, PT_MOD | PT_REF) &&
4114 4108 (PTE_GET(new, PT_NOSYNC) || !PTE_GET(new, PT_WRITABLE) ||
4115 4109 !PTE_GET(new, PT_MOD | PT_REF))) {
4116 4110
4117 4111 ASSERT(!pfn_is_foreign(PTE2PFN(expected, ht->ht_level)));
4118 4112 pp = page_numtopp_nolock(PTE2PFN(expected, ht->ht_level));
4119 4113 ASSERT(pp != NULL);
4120 4114 if (PTE_GET(expected, PT_MOD))
4121 4115 rm |= P_MOD;
4122 4116 if (PTE_GET(expected, PT_REF))
4123 4117 rm |= P_REF;
4124 4118 PTE_CLR(new, PT_MOD | PT_REF);
4125 4119 }
4126 4120
4127 4121 replaced = x86pte_update(ht, entry, expected, new);
4128 4122 if (replaced != expected)
4129 4123 return (replaced);
4130 4124
4131 4125 if (rm) {
4132 4126 /*
4133 4127 * sync to all constituent pages of a large page
4134 4128 */
4135 4129 pgcnt_t pgcnt = page_get_pagecnt(ht->ht_level);
4136 4130 ASSERT(IS_P2ALIGNED(pp->p_pagenum, pgcnt));
4137 4131 while (pgcnt-- > 0) {
4138 4132 /*
4139 4133 * hat_page_demote() can't decrease
4140 4134 * pszc below this mapping size
4141 4135 * since large mapping existed after we
4142 4136 * took mlist lock.
4143 4137 */
4144 4138 ASSERT(pp->p_szc >= ht->ht_level);
4145 4139 hat_page_setattr(pp, rm);
4146 4140 ++pp;
4147 4141 }
4148 4142 }
4149 4143
4150 4144 return (0);
4151 4145 }
4152 4146
4153 4147 /* ARGSUSED */
4154 4148 void
4155 4149 hat_join_srd(struct hat *hat, vnode_t *evp)
4156 4150 {
4157 4151 }
4158 4152
4159 4153 /* ARGSUSED */
4160 4154 hat_region_cookie_t
4161 4155 hat_join_region(struct hat *hat,
4162 4156 caddr_t r_saddr,
4163 4157 size_t r_size,
4164 4158 void *r_obj,
4165 4159 u_offset_t r_objoff,
4166 4160 uchar_t r_perm,
4167 4161 uchar_t r_pgszc,
4168 4162 hat_rgn_cb_func_t r_cb_function,
4169 4163 uint_t flags)
4170 4164 {
4171 4165 panic("No shared region support on x86");
4172 4166 return (HAT_INVALID_REGION_COOKIE);
4173 4167 }
4174 4168
4175 4169 /* ARGSUSED */
4176 4170 void
4177 4171 hat_leave_region(struct hat *hat, hat_region_cookie_t rcookie, uint_t flags)
4178 4172 {
4179 4173 panic("No shared region support on x86");
4180 4174 }
4181 4175
4182 4176 /* ARGSUSED */
4183 4177 void
4184 4178 hat_dup_region(struct hat *hat, hat_region_cookie_t rcookie)
4185 4179 {
4186 4180 panic("No shared region support on x86");
4187 4181 }
4188 4182
4189 4183
4190 4184 /*
4191 4185 * Kernel Physical Mapping (kpm) facility
4192 4186 *
4193 4187 * Most of the routines needed to support segkpm are almost no-ops on the
4194 4188 * x86 platform. We map in the entire segment when it is created and leave
4195 4189 * it mapped in, so there is no additional work required to set up and tear
4196 4190 * down individual mappings. All of these routines were created to support
4197 4191 * SPARC platforms that have to avoid aliasing in their virtually indexed
4198 4192 * caches.
4199 4193 *
4200 4194 * Most of the routines have sanity checks in them (e.g. verifying that the
4201 4195 * passed-in page is locked). We don't actually care about most of these
4202 4196 * checks on x86, but we leave them in place to identify problems in the
4203 4197 * upper levels.
4204 4198 */
4205 4199
4206 4200 /*
4207 4201 * Map in a locked page and return the vaddr.
4208 4202 */
4209 4203 /*ARGSUSED*/
4210 4204 caddr_t
4211 4205 hat_kpm_mapin(struct page *pp, struct kpme *kpme)
4212 4206 {
4213 4207 caddr_t vaddr;
4214 4208
4215 4209 #ifdef DEBUG
4216 4210 if (kpm_enable == 0) {
4217 4211 cmn_err(CE_WARN, "hat_kpm_mapin: kpm_enable not set\n");
4218 4212 return ((caddr_t)NULL);
4219 4213 }
4220 4214
4221 4215 if (pp == NULL || PAGE_LOCKED(pp) == 0) {
4222 4216 cmn_err(CE_WARN, "hat_kpm_mapin: pp zero or not locked\n");
4223 4217 return ((caddr_t)NULL);
4224 4218 }
4225 4219 #endif
4226 4220
4227 4221 vaddr = hat_kpm_page2va(pp, 1);
4228 4222
4229 4223 return (vaddr);
4230 4224 }
4231 4225
4232 4226 /*
4233 4227 * Mapout a locked page.
4234 4228 */
4235 4229 /*ARGSUSED*/
4236 4230 void
4237 4231 hat_kpm_mapout(struct page *pp, struct kpme *kpme, caddr_t vaddr)
4238 4232 {
4239 4233 #ifdef DEBUG
4240 4234 if (kpm_enable == 0) {
4241 4235 cmn_err(CE_WARN, "hat_kpm_mapout: kpm_enable not set\n");
4242 4236 return;
4243 4237 }
4244 4238
4245 4239 if (IS_KPM_ADDR(vaddr) == 0) {
4246 4240 cmn_err(CE_WARN, "hat_kpm_mapout: no kpm address\n");
4247 4241 return;
4248 4242 }
4249 4243
4250 4244 if (pp == NULL || PAGE_LOCKED(pp) == 0) {
4251 4245 cmn_err(CE_WARN, "hat_kpm_mapout: page zero or not locked\n");
4252 4246 return;
4253 4247 }
4254 4248 #endif
4255 4249 }
4256 4250
4257 4251 /*
4258 4252 * hat_kpm_mapin_pfn is used to obtain a kpm mapping for physical
4259 4253 * memory addresses that are not described by a page_t. It can
4260 4254 * also be used for normal pages that are not locked, but beware
4261 4255 * this is dangerous - no locking is performed, so the identity of
4262 4256 * the page could change. hat_kpm_mapin_pfn is not supported when
4263 4257 * vac_colors > 1, because the chosen va depends on the page identity,
4264 4258 * which could change.
4265 4259 * The caller must only pass pfn's for valid physical addresses; violation
4266 4260 * of this rule will cause panic.
4267 4261 */
4268 4262 caddr_t
4269 4263 hat_kpm_mapin_pfn(pfn_t pfn)
4270 4264 {
4271 4265 caddr_t paddr, vaddr;
4272 4266
4273 4267 if (kpm_enable == 0)
4274 4268 return ((caddr_t)NULL);
4275 4269
4276 4270 paddr = (caddr_t)ptob(pfn);
4277 4271 vaddr = (uintptr_t)kpm_vbase + paddr;
4278 4272
4279 4273 return ((caddr_t)vaddr);
4280 4274 }
4281 4275
4282 4276 /*ARGSUSED*/
4283 4277 void
4284 4278 hat_kpm_mapout_pfn(pfn_t pfn)
4285 4279 {
4286 4280 /* empty */
4287 4281 }
4288 4282
4289 4283 /*
4290 4284 * Return the kpm virtual address for a specific pfn
4291 4285 */
4292 4286 caddr_t
4293 4287 hat_kpm_pfn2va(pfn_t pfn)
4294 4288 {
4295 4289 uintptr_t vaddr = (uintptr_t)kpm_vbase + mmu_ptob(pfn);
4296 4290
4297 4291 ASSERT(!pfn_is_foreign(pfn));
4298 4292 return ((caddr_t)vaddr);
4299 4293 }
4300 4294
4301 4295 /*
4302 4296 * Return the kpm virtual address for the page at pp.
4303 4297 */
4304 4298 /*ARGSUSED*/
4305 4299 caddr_t
4306 4300 hat_kpm_page2va(struct page *pp, int checkswap)
4307 4301 {
4308 4302 return (hat_kpm_pfn2va(pp->p_pagenum));
4309 4303 }
4310 4304
4311 4305 /*
4312 4306 * Return the page frame number for the kpm virtual address vaddr.
4313 4307 */
4314 4308 pfn_t
4315 4309 hat_kpm_va2pfn(caddr_t vaddr)
4316 4310 {
4317 4311 pfn_t pfn;
4318 4312
4319 4313 ASSERT(IS_KPM_ADDR(vaddr));
4320 4314
4321 4315 pfn = (pfn_t)btop(vaddr - kpm_vbase);
4322 4316
4323 4317 return (pfn);
4324 4318 }
4325 4319
4326 4320
4327 4321 /*
4328 4322 * Return the page for the kpm virtual address vaddr.
4329 4323 */
4330 4324 page_t *
4331 4325 hat_kpm_vaddr2page(caddr_t vaddr)
4332 4326 {
4333 4327 pfn_t pfn;
4334 4328
4335 4329 ASSERT(IS_KPM_ADDR(vaddr));
4336 4330
4337 4331 pfn = hat_kpm_va2pfn(vaddr);
4338 4332
4339 4333 return (page_numtopp_nolock(pfn));
4340 4334 }
4341 4335
4342 4336 /*
4343 4337 * hat_kpm_fault is called from segkpm_fault when we take a page fault on a
4344 4338 * KPM page. This should never happen on x86
4345 4339 */
4346 4340 int
4347 4341 hat_kpm_fault(hat_t *hat, caddr_t vaddr)
4348 4342 {
4349 4343 panic("pagefault in seg_kpm. hat: 0x%p vaddr: 0x%p",
4350 4344 (void *)hat, (void *)vaddr);
4351 4345
4352 4346 return (0);
4353 4347 }
4354 4348
4355 4349 /*ARGSUSED*/
4356 4350 void
4357 4351 hat_kpm_mseghash_clear(int nentries)
4358 4352 {}
4359 4353
4360 4354 /*ARGSUSED*/
4361 4355 void
4362 4356 hat_kpm_mseghash_update(pgcnt_t inx, struct memseg *msp)
4363 4357 {}
4364 4358
4365 4359 #ifndef __xpv
4366 4360 void
4367 4361 hat_kpm_addmem_mseg_update(struct memseg *msp, pgcnt_t nkpmpgs,
4368 4362 offset_t kpm_pages_off)
4369 4363 {
4370 4364 _NOTE(ARGUNUSED(nkpmpgs, kpm_pages_off));
4371 4365 pfn_t base, end;
4372 4366
4373 4367 /*
4374 4368 * kphysm_add_memory_dynamic() does not set nkpmpgs
4375 4369 * when page_t memory is externally allocated. That
4376 4370 * code must properly calculate nkpmpgs in all cases
4377 4371 * if nkpmpgs needs to be used at some point.
4378 4372 */
4379 4373
4380 4374 /*
4381 4375 * The meta (page_t) pages for dynamically added memory are allocated
4382 4376 * either from the incoming memory itself or from existing memory.
4383 4377 * In the former case the base of the incoming pages will be different
4384 4378 * than the base of the dynamic segment so call memseg_get_start() to
4385 4379 * get the actual base of the incoming memory for each case.
4386 4380 */
4387 4381
4388 4382 base = memseg_get_start(msp);
4389 4383 end = msp->pages_end;
4390 4384
4391 4385 hat_devload(kas.a_hat, kpm_vbase + mmu_ptob(base),
4392 4386 mmu_ptob(end - base), base, PROT_READ | PROT_WRITE,
4393 4387 HAT_LOAD | HAT_LOAD_LOCK | HAT_LOAD_NOCONSIST);
4394 4388 }
4395 4389
4396 4390 void
4397 4391 hat_kpm_addmem_mseg_insert(struct memseg *msp)
4398 4392 {
4399 4393 _NOTE(ARGUNUSED(msp));
4400 4394 }
4401 4395
4402 4396 void
4403 4397 hat_kpm_addmem_memsegs_update(struct memseg *msp)
4404 4398 {
4405 4399 _NOTE(ARGUNUSED(msp));
4406 4400 }
4407 4401
4408 4402 /*
4409 4403 * Return end of metadata for an already setup memseg.
4410 4404 * X86 platforms don't need per-page meta data to support kpm.
4411 4405 */
4412 4406 caddr_t
4413 4407 hat_kpm_mseg_reuse(struct memseg *msp)
4414 4408 {
4415 4409 return ((caddr_t)msp->epages);
4416 4410 }
4417 4411
4418 4412 void
4419 4413 hat_kpm_delmem_mseg_update(struct memseg *msp, struct memseg **mspp)
4420 4414 {
4421 4415 _NOTE(ARGUNUSED(msp, mspp));
4422 4416 ASSERT(0);
4423 4417 }
4424 4418
4425 4419 void
4426 4420 hat_kpm_split_mseg_update(struct memseg *msp, struct memseg **mspp,
4427 4421 struct memseg *lo, struct memseg *mid, struct memseg *hi)
4428 4422 {
4429 4423 _NOTE(ARGUNUSED(msp, mspp, lo, mid, hi));
4430 4424 ASSERT(0);
4431 4425 }
4432 4426
4433 4427 /*
4434 4428 * Walk the memsegs chain, applying func to each memseg span.
4435 4429 */
4436 4430 void
4437 4431 hat_kpm_walk(void (*func)(void *, void *, size_t), void *arg)
4438 4432 {
4439 4433 pfn_t pbase, pend;
4440 4434 void *base;
4441 4435 size_t size;
4442 4436 struct memseg *msp;
4443 4437
4444 4438 for (msp = memsegs; msp; msp = msp->next) {
4445 4439 pbase = msp->pages_base;
4446 4440 pend = msp->pages_end;
4447 4441 base = ptob(pbase) + kpm_vbase;
4448 4442 size = ptob(pend - pbase);
4449 4443 func(arg, base, size);
4450 4444 }
4451 4445 }
4452 4446
4453 4447 #else /* __xpv */
4454 4448
4455 4449 /*
4456 4450 * There are specific Hypervisor calls to establish and remove mappings
4457 4451 * to grant table references and the privcmd driver. We have to ensure
4458 4452 * that a page table actually exists.
4459 4453 */
4460 4454 void
4461 4455 hat_prepare_mapping(hat_t *hat, caddr_t addr, uint64_t *pte_ma)
4462 4456 {
4463 4457 maddr_t base_ma;
4464 4458 htable_t *ht;
4465 4459 uint_t entry;
4466 4460
4467 4461 ASSERT(IS_P2ALIGNED((uintptr_t)addr, MMU_PAGESIZE));
4468 4462 XPV_DISALLOW_MIGRATE();
4469 4463 ht = htable_create(hat, (uintptr_t)addr, 0, NULL);
4470 4464
4471 4465 /*
4472 4466 * if an address for pte_ma is passed in, return the MA of the pte
4473 4467 * for this specific address. This address is only valid as long
4474 4468 * as the htable stays locked.
4475 4469 */
4476 4470 if (pte_ma != NULL) {
4477 4471 entry = htable_va2entry((uintptr_t)addr, ht);
4478 4472 base_ma = pa_to_ma(ptob(ht->ht_pfn));
4479 4473 *pte_ma = base_ma + (entry << mmu.pte_size_shift);
4480 4474 }
4481 4475 XPV_ALLOW_MIGRATE();
4482 4476 }
4483 4477
4484 4478 void
4485 4479 hat_release_mapping(hat_t *hat, caddr_t addr)
4486 4480 {
4487 4481 htable_t *ht;
4488 4482
4489 4483 ASSERT(IS_P2ALIGNED((uintptr_t)addr, MMU_PAGESIZE));
4490 4484 XPV_DISALLOW_MIGRATE();
4491 4485 ht = htable_lookup(hat, (uintptr_t)addr, 0);
4492 4486 ASSERT(ht != NULL);
4493 4487 ASSERT(ht->ht_busy >= 2);
4494 4488 htable_release(ht);
4495 4489 htable_release(ht);
4496 4490 XPV_ALLOW_MIGRATE();
4497 4491 }
4498 4492 #endif /* __xpv */
↓ open down ↓ |
1628 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX