1 /*
   2  * CDDL HEADER START
   3  *
   4  * The contents of this file are subject to the terms of the
   5  * Common Development and Distribution License (the "License").
   6  * You may not use this file except in compliance with the License.
   7  *
   8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
   9  * or http://www.opensolaris.org/os/licensing.
  10  * See the License for the specific language governing permissions
  11  * and limitations under the License.
  12  *
  13  * When distributing Covered Code, include this CDDL HEADER in each
  14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
  15  * If applicable, add the following below this CDDL HEADER, with the
  16  * fields enclosed by brackets "[]" replaced with your own identifying
  17  * information: Portions Copyright [yyyy] [name of copyright owner]
  18  *
  19  * CDDL HEADER END
  20  */
  21 /*
  22  * Copyright (c) 1992, 2010, Oracle and/or its affiliates. All rights reserved.
  23  */
  24 /*
  25  * Copyright (c) 2010, Intel Corporation.
  26  * All rights reserved.
  27  */
  28 /*
  29  * Copyright 2011 Nexenta Systems, Inc.  All rights reserved.
  30  * Copyright (c) 2014, 2015 by Delphix. All rights reserved.
  31  */
  32 
  33 /*
  34  * VM - Hardware Address Translation management for i386 and amd64
  35  *
  36  * Implementation of the interfaces described in <common/vm/hat.h>
  37  *
  38  * Nearly all the details of how the hardware is managed should not be
  39  * visible outside this layer except for misc. machine specific functions
  40  * that work in conjunction with this code.
  41  *
  42  * Routines used only inside of i86pc/vm start with hati_ for HAT Internal.
  43  */
  44 
  45 #include <sys/machparam.h>
  46 #include <sys/machsystm.h>
  47 #include <sys/mman.h>
  48 #include <sys/types.h>
  49 #include <sys/systm.h>
  50 #include <sys/cpuvar.h>
  51 #include <sys/thread.h>
  52 #include <sys/proc.h>
  53 #include <sys/cpu.h>
  54 #include <sys/kmem.h>
  55 #include <sys/disp.h>
  56 #include <sys/shm.h>
  57 #include <sys/sysmacros.h>
  58 #include <sys/machparam.h>
  59 #include <sys/vmem.h>
  60 #include <sys/vmsystm.h>
  61 #include <sys/promif.h>
  62 #include <sys/var.h>
  63 #include <sys/x86_archext.h>
  64 #include <sys/atomic.h>
  65 #include <sys/bitmap.h>
  66 #include <sys/controlregs.h>
  67 #include <sys/bootconf.h>
  68 #include <sys/bootsvcs.h>
  69 #include <sys/bootinfo.h>
  70 #include <sys/archsystm.h>
  71 
  72 #include <vm/seg_kmem.h>
  73 #include <vm/hat_i86.h>
  74 #include <vm/as.h>
  75 #include <vm/seg.h>
  76 #include <vm/page.h>
  77 #include <vm/seg_kp.h>
  78 #include <vm/seg_kpm.h>
  79 #include <vm/vm_dep.h>
  80 #ifdef __xpv
  81 #include <sys/hypervisor.h>
  82 #endif
  83 #include <vm/kboot_mmu.h>
  84 #include <vm/seg_spt.h>
  85 
  86 #include <sys/cmn_err.h>
  87 
  88 /*
  89  * Basic parameters for hat operation.
  90  */
  91 struct hat_mmu_info mmu;
  92 
  93 /*
  94  * The page that is the kernel's top level pagetable.
  95  *
  96  * For 32 bit PAE support on i86pc, the kernel hat will use the 1st 4 entries
  97  * on this 4K page for its top level page table. The remaining groups of
  98  * 4 entries are used for per processor copies of user VLP pagetables for
  99  * running threads.  See hat_switch() and reload_pae32() for details.
 100  *
 101  * vlp_page[0..3] - level==2 PTEs for kernel HAT
 102  * vlp_page[4..7] - level==2 PTEs for user thread on cpu 0
 103  * vlp_page[8..11]  - level==2 PTE for user thread on cpu 1
 104  * etc...
 105  */
 106 static x86pte_t *vlp_page;
 107 
 108 /*
 109  * forward declaration of internal utility routines
 110  */
 111 static x86pte_t hati_update_pte(htable_t *ht, uint_t entry, x86pte_t expected,
 112         x86pte_t new);
 113 
 114 /*
 115  * The kernel address space exists in all HATs. To implement this the
 116  * kernel reserves a fixed number of entries in the topmost level(s) of page
 117  * tables. The values are setup during startup and then copied to every user
 118  * hat created by hat_alloc(). This means that kernelbase must be:
 119  *
 120  *        4Meg aligned for 32 bit kernels
 121  *      512Gig aligned for x86_64 64 bit kernel
 122  *
 123  * The hat_kernel_range_ts describe what needs to be copied from kernel hat
 124  * to each user hat.
 125  */
 126 typedef struct hat_kernel_range {
 127         level_t         hkr_level;
 128         uintptr_t       hkr_start_va;
 129         uintptr_t       hkr_end_va;     /* zero means to end of memory */
 130 } hat_kernel_range_t;
 131 #define NUM_KERNEL_RANGE 2
 132 static hat_kernel_range_t kernel_ranges[NUM_KERNEL_RANGE];
 133 static int num_kernel_ranges;
 134 
 135 uint_t use_boot_reserve = 1;    /* cleared after early boot process */
 136 uint_t can_steal_post_boot = 0; /* set late in boot to enable stealing */
 137 
 138 /*
 139  * enable_1gpg: controls 1g page support for user applications.
 140  * By default, 1g pages are exported to user applications. enable_1gpg can
 141  * be set to 0 to not export.
 142  */
 143 int     enable_1gpg = 1;
 144 
 145 /*
 146  * AMD shanghai processors provide better management of 1gb ptes in its tlb.
 147  * By default, 1g page support will be disabled for pre-shanghai AMD
 148  * processors that don't have optimal tlb support for the 1g page size.
 149  * chk_optimal_1gtlb can be set to 0 to force 1g page support on sub-optimal
 150  * processors.
 151  */
 152 int     chk_optimal_1gtlb = 1;
 153 
 154 
 155 #ifdef DEBUG
 156 uint_t  map1gcnt;
 157 #endif
 158 
 159 
 160 /*
 161  * A cpuset for all cpus. This is used for kernel address cross calls, since
 162  * the kernel addresses apply to all cpus.
 163  */
 164 cpuset_t khat_cpuset;
 165 
 166 /*
 167  * management stuff for hat structures
 168  */
 169 kmutex_t        hat_list_lock;
 170 kcondvar_t      hat_list_cv;
 171 kmem_cache_t    *hat_cache;
 172 kmem_cache_t    *hat_hash_cache;
 173 kmem_cache_t    *vlp_hash_cache;
 174 
 175 /*
 176  * Simple statistics
 177  */
 178 struct hatstats hatstat;
 179 
 180 /*
 181  * Some earlier hypervisor versions do not emulate cmpxchg of PTEs
 182  * correctly.  For such hypervisors we must set PT_USER for kernel
 183  * entries ourselves (normally the emulation would set PT_USER for
 184  * kernel entries and PT_USER|PT_GLOBAL for user entries).  pt_kern is
 185  * thus set appropriately.  Note that dboot/kbm is OK, as only the full
 186  * HAT uses cmpxchg() and the other paths (hypercall etc.) were never
 187  * incorrect.
 188  */
 189 int pt_kern;
 190 
 191 /*
 192  * useful stuff for atomic access/clearing/setting REF/MOD/RO bits in page_t's.
 193  */
 194 extern void atomic_orb(uchar_t *addr, uchar_t val);
 195 extern void atomic_andb(uchar_t *addr, uchar_t val);
 196 
 197 #ifndef __xpv
 198 extern pfn_t memseg_get_start(struct memseg *);
 199 #endif
 200 
 201 #define PP_GETRM(pp, rmmask)    (pp->p_nrm & rmmask)
 202 #define PP_ISMOD(pp)            PP_GETRM(pp, P_MOD)
 203 #define PP_ISREF(pp)            PP_GETRM(pp, P_REF)
 204 #define PP_ISRO(pp)             PP_GETRM(pp, P_RO)
 205 
 206 #define PP_SETRM(pp, rm)        atomic_orb(&(pp->p_nrm), rm)
 207 #define PP_SETMOD(pp)           PP_SETRM(pp, P_MOD)
 208 #define PP_SETREF(pp)           PP_SETRM(pp, P_REF)
 209 #define PP_SETRO(pp)            PP_SETRM(pp, P_RO)
 210 
 211 #define PP_CLRRM(pp, rm)        atomic_andb(&(pp->p_nrm), ~(rm))
 212 #define PP_CLRMOD(pp)           PP_CLRRM(pp, P_MOD)
 213 #define PP_CLRREF(pp)           PP_CLRRM(pp, P_REF)
 214 #define PP_CLRRO(pp)            PP_CLRRM(pp, P_RO)
 215 #define PP_CLRALL(pp)           PP_CLRRM(pp, P_MOD | P_REF | P_RO)
 216 
 217 /*
 218  * kmem cache constructor for struct hat
 219  */
 220 /*ARGSUSED*/
 221 static int
 222 hati_constructor(void *buf, void *handle, int kmflags)
 223 {
 224         hat_t   *hat = buf;
 225 
 226         mutex_init(&hat->hat_mutex, NULL, MUTEX_DEFAULT, NULL);
 227         bzero(hat->hat_pages_mapped,
 228             sizeof (pgcnt_t) * (mmu.max_page_level + 1));
 229         hat->hat_ism_pgcnt = 0;
 230         hat->hat_stats = 0;
 231         hat->hat_flags = 0;
 232         CPUSET_ZERO(hat->hat_cpus);
 233         hat->hat_htable = NULL;
 234         hat->hat_ht_hash = NULL;
 235         return (0);
 236 }
 237 
 238 /*
 239  * Allocate a hat structure for as. We also create the top level
 240  * htable and initialize it to contain the kernel hat entries.
 241  */
 242 hat_t *
 243 hat_alloc(struct as *as)
 244 {
 245         hat_t                   *hat;
 246         htable_t                *ht;    /* top level htable */
 247         uint_t                  use_vlp;
 248         uint_t                  r;
 249         hat_kernel_range_t      *rp;
 250         uintptr_t               va;
 251         uintptr_t               eva;
 252         uint_t                  start;
 253         uint_t                  cnt;
 254         htable_t                *src;
 255 
 256         /*
 257          * Once we start creating user process HATs we can enable
 258          * the htable_steal() code.
 259          */
 260         if (can_steal_post_boot == 0)
 261                 can_steal_post_boot = 1;
 262 
 263         ASSERT(AS_WRITE_HELD(as, &as->a_lock));
 264         hat = kmem_cache_alloc(hat_cache, KM_SLEEP);
 265         hat->hat_as = as;
 266         mutex_init(&hat->hat_mutex, NULL, MUTEX_DEFAULT, NULL);
 267         ASSERT(hat->hat_flags == 0);
 268 
 269 #if defined(__xpv)
 270         /*
 271          * No VLP stuff on the hypervisor due to the 64-bit split top level
 272          * page tables.  On 32-bit it's not needed as the hypervisor takes
 273          * care of copying the top level PTEs to a below 4Gig page.
 274          */
 275         use_vlp = 0;
 276 #else   /* __xpv */
 277         /* 32 bit processes uses a VLP style hat when running with PAE */
 278 #if defined(__amd64)
 279         use_vlp = (ttoproc(curthread)->p_model == DATAMODEL_ILP32);
 280 #elif defined(__i386)
 281         use_vlp = mmu.pae_hat;
 282 #endif
 283 #endif  /* __xpv */
 284         if (use_vlp) {
 285                 hat->hat_flags = HAT_VLP;
 286                 bzero(hat->hat_vlp_ptes, VLP_SIZE);
 287         }
 288 
 289         /*
 290          * Allocate the htable hash
 291          */
 292         if ((hat->hat_flags & HAT_VLP)) {
 293                 hat->hat_num_hash = mmu.vlp_hash_cnt;
 294                 hat->hat_ht_hash = kmem_cache_alloc(vlp_hash_cache, KM_SLEEP);
 295         } else {
 296                 hat->hat_num_hash = mmu.hash_cnt;
 297                 hat->hat_ht_hash = kmem_cache_alloc(hat_hash_cache, KM_SLEEP);
 298         }
 299         bzero(hat->hat_ht_hash, hat->hat_num_hash * sizeof (htable_t *));
 300 
 301         /*
 302          * Initialize Kernel HAT entries at the top of the top level page
 303          * tables for the new hat.
 304          */
 305         hat->hat_htable = NULL;
 306         hat->hat_ht_cached = NULL;
 307         XPV_DISALLOW_MIGRATE();
 308         ht = htable_create(hat, (uintptr_t)0, TOP_LEVEL(hat), NULL);
 309         hat->hat_htable = ht;
 310 
 311 #if defined(__amd64)
 312         if (hat->hat_flags & HAT_VLP)
 313                 goto init_done;
 314 #endif
 315 
 316         for (r = 0; r < num_kernel_ranges; ++r) {
 317                 rp = &kernel_ranges[r];
 318                 for (va = rp->hkr_start_va; va != rp->hkr_end_va;
 319                     va += cnt * LEVEL_SIZE(rp->hkr_level)) {
 320 
 321                         if (rp->hkr_level == TOP_LEVEL(hat))
 322                                 ht = hat->hat_htable;
 323                         else
 324                                 ht = htable_create(hat, va, rp->hkr_level,
 325                                     NULL);
 326 
 327                         start = htable_va2entry(va, ht);
 328                         cnt = HTABLE_NUM_PTES(ht) - start;
 329                         eva = va +
 330                             ((uintptr_t)cnt << LEVEL_SHIFT(rp->hkr_level));
 331                         if (rp->hkr_end_va != 0 &&
 332                             (eva > rp->hkr_end_va || eva == 0))
 333                                 cnt = htable_va2entry(rp->hkr_end_va, ht) -
 334                                     start;
 335 
 336 #if defined(__i386) && !defined(__xpv)
 337                         if (ht->ht_flags & HTABLE_VLP) {
 338                                 bcopy(&vlp_page[start],
 339                                     &hat->hat_vlp_ptes[start],
 340                                     cnt * sizeof (x86pte_t));
 341                                 continue;
 342                         }
 343 #endif
 344                         src = htable_lookup(kas.a_hat, va, rp->hkr_level);
 345                         ASSERT(src != NULL);
 346                         x86pte_copy(src, ht, start, cnt);
 347                         htable_release(src);
 348                 }
 349         }
 350 
 351 init_done:
 352 
 353 #if defined(__xpv)
 354         /*
 355          * Pin top level page tables after initializing them
 356          */
 357         xen_pin(hat->hat_htable->ht_pfn, mmu.max_level);
 358 #if defined(__amd64)
 359         xen_pin(hat->hat_user_ptable, mmu.max_level);
 360 #endif
 361 #endif
 362         XPV_ALLOW_MIGRATE();
 363 
 364         /*
 365          * Put it at the start of the global list of all hats (used by stealing)
 366          *
 367          * kas.a_hat is not in the list but is instead used to find the
 368          * first and last items in the list.
 369          *
 370          * - kas.a_hat->hat_next points to the start of the user hats.
 371          *   The list ends where hat->hat_next == NULL
 372          *
 373          * - kas.a_hat->hat_prev points to the last of the user hats.
 374          *   The list begins where hat->hat_prev == NULL
 375          */
 376         mutex_enter(&hat_list_lock);
 377         hat->hat_prev = NULL;
 378         hat->hat_next = kas.a_hat->hat_next;
 379         if (hat->hat_next)
 380                 hat->hat_next->hat_prev = hat;
 381         else
 382                 kas.a_hat->hat_prev = hat;
 383         kas.a_hat->hat_next = hat;
 384         mutex_exit(&hat_list_lock);
 385 
 386         return (hat);
 387 }
 388 
 389 /*
 390  * process has finished executing but as has not been cleaned up yet.
 391  */
 392 /*ARGSUSED*/
 393 void
 394 hat_free_start(hat_t *hat)
 395 {
 396         ASSERT(AS_WRITE_HELD(hat->hat_as, &hat->hat_as->a_lock));
 397 
 398         /*
 399          * If the hat is currently a stealing victim, wait for the stealing
 400          * to finish.  Once we mark it as HAT_FREEING, htable_steal()
 401          * won't look at its pagetables anymore.
 402          */
 403         mutex_enter(&hat_list_lock);
 404         while (hat->hat_flags & HAT_VICTIM)
 405                 cv_wait(&hat_list_cv, &hat_list_lock);
 406         hat->hat_flags |= HAT_FREEING;
 407         mutex_exit(&hat_list_lock);
 408 }
 409 
 410 /*
 411  * An address space is being destroyed, so we destroy the associated hat.
 412  */
 413 void
 414 hat_free_end(hat_t *hat)
 415 {
 416         kmem_cache_t *cache;
 417 
 418         ASSERT(hat->hat_flags & HAT_FREEING);
 419 
 420         /*
 421          * must not be running on the given hat
 422          */
 423         ASSERT(CPU->cpu_current_hat != hat);
 424 
 425         /*
 426          * Remove it from the list of HATs
 427          */
 428         mutex_enter(&hat_list_lock);
 429         if (hat->hat_prev)
 430                 hat->hat_prev->hat_next = hat->hat_next;
 431         else
 432                 kas.a_hat->hat_next = hat->hat_next;
 433         if (hat->hat_next)
 434                 hat->hat_next->hat_prev = hat->hat_prev;
 435         else
 436                 kas.a_hat->hat_prev = hat->hat_prev;
 437         mutex_exit(&hat_list_lock);
 438         hat->hat_next = hat->hat_prev = NULL;
 439 
 440 #if defined(__xpv)
 441         /*
 442          * On the hypervisor, unpin top level page table(s)
 443          */
 444         xen_unpin(hat->hat_htable->ht_pfn);
 445 #if defined(__amd64)
 446         xen_unpin(hat->hat_user_ptable);
 447 #endif
 448 #endif
 449 
 450         /*
 451          * Make a pass through the htables freeing them all up.
 452          */
 453         htable_purge_hat(hat);
 454 
 455         /*
 456          * Decide which kmem cache the hash table came from, then free it.
 457          */
 458         if (hat->hat_flags & HAT_VLP)
 459                 cache = vlp_hash_cache;
 460         else
 461                 cache = hat_hash_cache;
 462         kmem_cache_free(cache, hat->hat_ht_hash);
 463         hat->hat_ht_hash = NULL;
 464 
 465         hat->hat_flags = 0;
 466         kmem_cache_free(hat_cache, hat);
 467 }
 468 
 469 /*
 470  * round kernelbase down to a supported value to use for _userlimit
 471  *
 472  * userlimit must be aligned down to an entry in the top level htable.
 473  * The one exception is for 32 bit HAT's running PAE.
 474  */
 475 uintptr_t
 476 hat_kernelbase(uintptr_t va)
 477 {
 478 #if defined(__i386)
 479         va &= LEVEL_MASK(1);
 480 #endif
 481         if (IN_VA_HOLE(va))
 482                 panic("_userlimit %p will fall in VA hole\n", (void *)va);
 483         return (va);
 484 }
 485 
 486 /*
 487  *
 488  */
 489 static void
 490 set_max_page_level()
 491 {
 492         level_t lvl;
 493 
 494         if (!kbm_largepage_support) {
 495                 lvl = 0;
 496         } else {
 497                 if (is_x86_feature(x86_featureset, X86FSET_1GPG)) {
 498                         lvl = 2;
 499                         if (chk_optimal_1gtlb &&
 500                             cpuid_opteron_erratum(CPU, 6671130)) {
 501                                 lvl = 1;
 502                         }
 503                         if (plat_mnode_xcheck(LEVEL_SIZE(2) >>
 504                             LEVEL_SHIFT(0))) {
 505                                 lvl = 1;
 506                         }
 507                 } else {
 508                         lvl = 1;
 509                 }
 510         }
 511         mmu.max_page_level = lvl;
 512 
 513         if ((lvl == 2) && (enable_1gpg == 0))
 514                 mmu.umax_page_level = 1;
 515         else
 516                 mmu.umax_page_level = lvl;
 517 }
 518 
 519 /*
 520  * Initialize hat data structures based on processor MMU information.
 521  */
 522 void
 523 mmu_init(void)
 524 {
 525         uint_t max_htables;
 526         uint_t pa_bits;
 527         uint_t va_bits;
 528         int i;
 529 
 530         /*
 531          * If CPU enabled the page table global bit, use it for the kernel
 532          * This is bit 7 in CR4 (PGE - Page Global Enable).
 533          */
 534         if (is_x86_feature(x86_featureset, X86FSET_PGE) &&
 535             (getcr4() & CR4_PGE) != 0)
 536                 mmu.pt_global = PT_GLOBAL;
 537 
 538         /*
 539          * Detect NX and PAE usage.
 540          */
 541         mmu.pae_hat = kbm_pae_support;
 542         if (kbm_nx_support)
 543                 mmu.pt_nx = PT_NX;
 544         else
 545                 mmu.pt_nx = 0;
 546 
 547         /*
 548          * Use CPU info to set various MMU parameters
 549          */
 550         cpuid_get_addrsize(CPU, &pa_bits, &va_bits);
 551 
 552         if (va_bits < sizeof (void *) * NBBY) {
 553                 mmu.hole_start = (1ul << (va_bits - 1));
 554                 mmu.hole_end = 0ul - mmu.hole_start - 1;
 555         } else {
 556                 mmu.hole_end = 0;
 557                 mmu.hole_start = mmu.hole_end - 1;
 558         }
 559 #if defined(OPTERON_ERRATUM_121)
 560         /*
 561          * If erratum 121 has already been detected at this time, hole_start
 562          * contains the value to be subtracted from mmu.hole_start.
 563          */
 564         ASSERT(hole_start == 0 || opteron_erratum_121 != 0);
 565         hole_start = mmu.hole_start - hole_start;
 566 #else
 567         hole_start = mmu.hole_start;
 568 #endif
 569         hole_end = mmu.hole_end;
 570 
 571         mmu.highest_pfn = mmu_btop((1ull << pa_bits) - 1);
 572         if (mmu.pae_hat == 0 && pa_bits > 32)
 573                 mmu.highest_pfn = PFN_4G - 1;
 574 
 575         if (mmu.pae_hat) {
 576                 mmu.pte_size = 8;       /* 8 byte PTEs */
 577                 mmu.pte_size_shift = 3;
 578         } else {
 579                 mmu.pte_size = 4;       /* 4 byte PTEs */
 580                 mmu.pte_size_shift = 2;
 581         }
 582 
 583         if (mmu.pae_hat && !is_x86_feature(x86_featureset, X86FSET_PAE))
 584                 panic("Processor does not support PAE");
 585 
 586         if (!is_x86_feature(x86_featureset, X86FSET_CX8))
 587                 panic("Processor does not support cmpxchg8b instruction");
 588 
 589 #if defined(__amd64)
 590 
 591         mmu.num_level = 4;
 592         mmu.max_level = 3;
 593         mmu.ptes_per_table = 512;
 594         mmu.top_level_count = 512;
 595 
 596         mmu.level_shift[0] = 12;
 597         mmu.level_shift[1] = 21;
 598         mmu.level_shift[2] = 30;
 599         mmu.level_shift[3] = 39;
 600 
 601 #elif defined(__i386)
 602 
 603         if (mmu.pae_hat) {
 604                 mmu.num_level = 3;
 605                 mmu.max_level = 2;
 606                 mmu.ptes_per_table = 512;
 607                 mmu.top_level_count = 4;
 608 
 609                 mmu.level_shift[0] = 12;
 610                 mmu.level_shift[1] = 21;
 611                 mmu.level_shift[2] = 30;
 612 
 613         } else {
 614                 mmu.num_level = 2;
 615                 mmu.max_level = 1;
 616                 mmu.ptes_per_table = 1024;
 617                 mmu.top_level_count = 1024;
 618 
 619                 mmu.level_shift[0] = 12;
 620                 mmu.level_shift[1] = 22;
 621         }
 622 
 623 #endif  /* __i386 */
 624 
 625         for (i = 0; i < mmu.num_level; ++i) {
 626                 mmu.level_size[i] = 1UL << mmu.level_shift[i];
 627                 mmu.level_offset[i] = mmu.level_size[i] - 1;
 628                 mmu.level_mask[i] = ~mmu.level_offset[i];
 629         }
 630 
 631         set_max_page_level();
 632 
 633         mmu_page_sizes = mmu.max_page_level + 1;
 634         mmu_exported_page_sizes = mmu.umax_page_level + 1;
 635 
 636         /* restrict legacy applications from using pagesizes 1g and above */
 637         mmu_legacy_page_sizes =
 638             (mmu_exported_page_sizes > 2) ? 2 : mmu_exported_page_sizes;
 639 
 640 
 641         for (i = 0; i <= mmu.max_page_level; ++i) {
 642                 mmu.pte_bits[i] = PT_VALID | pt_kern;
 643                 if (i > 0)
 644                         mmu.pte_bits[i] |= PT_PAGESIZE;
 645         }
 646 
 647         /*
 648          * NOTE Legacy 32 bit PAE mode only has the P_VALID bit at top level.
 649          */
 650         for (i = 1; i < mmu.num_level; ++i)
 651                 mmu.ptp_bits[i] = PT_PTPBITS;
 652 
 653 #if defined(__i386)
 654         mmu.ptp_bits[2] = PT_VALID;
 655 #endif
 656 
 657         /*
 658          * Compute how many hash table entries to have per process for htables.
 659          * We start with 1 page's worth of entries.
 660          *
 661          * If physical memory is small, reduce the amount need to cover it.
 662          */
 663         max_htables = physmax / mmu.ptes_per_table;
 664         mmu.hash_cnt = MMU_PAGESIZE / sizeof (htable_t *);
 665         while (mmu.hash_cnt > 16 && mmu.hash_cnt >= max_htables)
 666                 mmu.hash_cnt >>= 1;
 667         mmu.vlp_hash_cnt = mmu.hash_cnt;
 668 
 669 #if defined(__amd64)
 670         /*
 671          * If running in 64 bits and physical memory is large,
 672          * increase the size of the cache to cover all of memory for
 673          * a 64 bit process.
 674          */
 675 #define HASH_MAX_LENGTH 4
 676         while (mmu.hash_cnt * HASH_MAX_LENGTH < max_htables)
 677                 mmu.hash_cnt <<= 1;
 678 #endif
 679 }
 680 
 681 
 682 /*
 683  * initialize hat data structures
 684  */
 685 void
 686 hat_init()
 687 {
 688 #if defined(__i386)
 689         /*
 690          * _userlimit must be aligned correctly
 691          */
 692         if ((_userlimit & LEVEL_MASK(1)) != _userlimit) {
 693                 prom_printf("hat_init(): _userlimit=%p, not aligned at %p\n",
 694                     (void *)_userlimit, (void *)LEVEL_SIZE(1));
 695                 halt("hat_init(): Unable to continue");
 696         }
 697 #endif
 698 
 699         cv_init(&hat_list_cv, NULL, CV_DEFAULT, NULL);
 700 
 701         /*
 702          * initialize kmem caches
 703          */
 704         htable_init();
 705         hment_init();
 706 
 707         hat_cache = kmem_cache_create("hat_t",
 708             sizeof (hat_t), 0, hati_constructor, NULL, NULL,
 709             NULL, 0, 0);
 710 
 711         hat_hash_cache = kmem_cache_create("HatHash",
 712             mmu.hash_cnt * sizeof (htable_t *), 0, NULL, NULL, NULL,
 713             NULL, 0, 0);
 714 
 715         /*
 716          * VLP hats can use a smaller hash table size on large memroy machines
 717          */
 718         if (mmu.hash_cnt == mmu.vlp_hash_cnt) {
 719                 vlp_hash_cache = hat_hash_cache;
 720         } else {
 721                 vlp_hash_cache = kmem_cache_create("HatVlpHash",
 722                     mmu.vlp_hash_cnt * sizeof (htable_t *), 0, NULL, NULL, NULL,
 723                     NULL, 0, 0);
 724         }
 725 
 726         /*
 727          * Set up the kernel's hat
 728          */
 729         AS_LOCK_ENTER(&kas, &kas.a_lock, RW_WRITER);
 730         kas.a_hat = kmem_cache_alloc(hat_cache, KM_NOSLEEP);
 731         mutex_init(&kas.a_hat->hat_mutex, NULL, MUTEX_DEFAULT, NULL);
 732         kas.a_hat->hat_as = &kas;
 733         kas.a_hat->hat_flags = 0;
 734         AS_LOCK_EXIT(&kas, &kas.a_lock);
 735 
 736         CPUSET_ZERO(khat_cpuset);
 737         CPUSET_ADD(khat_cpuset, CPU->cpu_id);
 738 
 739         /*
 740          * The kernel hat's next pointer serves as the head of the hat list .
 741          * The kernel hat's prev pointer tracks the last hat on the list for
 742          * htable_steal() to use.
 743          */
 744         kas.a_hat->hat_next = NULL;
 745         kas.a_hat->hat_prev = NULL;
 746 
 747         /*
 748          * Allocate an htable hash bucket for the kernel
 749          * XX64 - tune for 64 bit procs
 750          */
 751         kas.a_hat->hat_num_hash = mmu.hash_cnt;
 752         kas.a_hat->hat_ht_hash = kmem_cache_alloc(hat_hash_cache, KM_NOSLEEP);
 753         bzero(kas.a_hat->hat_ht_hash, mmu.hash_cnt * sizeof (htable_t *));
 754 
 755         /*
 756          * zero out the top level and cached htable pointers
 757          */
 758         kas.a_hat->hat_ht_cached = NULL;
 759         kas.a_hat->hat_htable = NULL;
 760 
 761         /*
 762          * Pre-allocate hrm_hashtab before enabling the collection of
 763          * refmod statistics.  Allocating on the fly would mean us
 764          * running the risk of suffering recursive mutex enters or
 765          * deadlocks.
 766          */
 767         hrm_hashtab = kmem_zalloc(HRM_HASHSIZE * sizeof (struct hrmstat *),
 768             KM_SLEEP);
 769 }
 770 
 771 /*
 772  * Prepare CPU specific pagetables for VLP processes on 64 bit kernels.
 773  *
 774  * Each CPU has a set of 2 pagetables that are reused for any 32 bit
 775  * process it runs. They are the top level pagetable, hci_vlp_l3ptes, and
 776  * the next to top level table for the bottom 512 Gig, hci_vlp_l2ptes.
 777  */
 778 /*ARGSUSED*/
 779 static void
 780 hat_vlp_setup(struct cpu *cpu)
 781 {
 782 #if defined(__amd64) && !defined(__xpv)
 783         struct hat_cpu_info *hci = cpu->cpu_hat_info;
 784         pfn_t pfn;
 785 
 786         /*
 787          * allocate the level==2 page table for the bottom most
 788          * 512Gig of address space (this is where 32 bit apps live)
 789          */
 790         ASSERT(hci != NULL);
 791         hci->hci_vlp_l2ptes = kmem_zalloc(MMU_PAGESIZE, KM_SLEEP);
 792 
 793         /*
 794          * Allocate a top level pagetable and copy the kernel's
 795          * entries into it. Then link in hci_vlp_l2ptes in the 1st entry.
 796          */
 797         hci->hci_vlp_l3ptes = kmem_zalloc(MMU_PAGESIZE, KM_SLEEP);
 798         hci->hci_vlp_pfn =
 799             hat_getpfnum(kas.a_hat, (caddr_t)hci->hci_vlp_l3ptes);
 800         ASSERT(hci->hci_vlp_pfn != PFN_INVALID);
 801         bcopy(vlp_page, hci->hci_vlp_l3ptes, MMU_PAGESIZE);
 802 
 803         pfn = hat_getpfnum(kas.a_hat, (caddr_t)hci->hci_vlp_l2ptes);
 804         ASSERT(pfn != PFN_INVALID);
 805         hci->hci_vlp_l3ptes[0] = MAKEPTP(pfn, 2);
 806 #endif /* __amd64 && !__xpv */
 807 }
 808 
 809 /*ARGSUSED*/
 810 static void
 811 hat_vlp_teardown(cpu_t *cpu)
 812 {
 813 #if defined(__amd64) && !defined(__xpv)
 814         struct hat_cpu_info *hci;
 815 
 816         if ((hci = cpu->cpu_hat_info) == NULL)
 817                 return;
 818         if (hci->hci_vlp_l2ptes)
 819                 kmem_free(hci->hci_vlp_l2ptes, MMU_PAGESIZE);
 820         if (hci->hci_vlp_l3ptes)
 821                 kmem_free(hci->hci_vlp_l3ptes, MMU_PAGESIZE);
 822 #endif
 823 }
 824 
 825 #define NEXT_HKR(r, l, s, e) {                  \
 826         kernel_ranges[r].hkr_level = l;         \
 827         kernel_ranges[r].hkr_start_va = s;      \
 828         kernel_ranges[r].hkr_end_va = e;        \
 829         ++r;                                    \
 830 }
 831 
 832 /*
 833  * Finish filling in the kernel hat.
 834  * Pre fill in all top level kernel page table entries for the kernel's
 835  * part of the address range.  From this point on we can't use any new
 836  * kernel large pages if they need PTE's at max_level
 837  *
 838  * create the kmap mappings.
 839  */
 840 void
 841 hat_init_finish(void)
 842 {
 843         size_t          size;
 844         uint_t          r = 0;
 845         uintptr_t       va;
 846         hat_kernel_range_t *rp;
 847 
 848 
 849         /*
 850          * We are now effectively running on the kernel hat.
 851          * Clearing use_boot_reserve shuts off using the pre-allocated boot
 852          * reserve for all HAT allocations.  From here on, the reserves are
 853          * only used when avoiding recursion in kmem_alloc().
 854          */
 855         use_boot_reserve = 0;
 856         htable_adjust_reserve();
 857 
 858         /*
 859          * User HATs are initialized with copies of all kernel mappings in
 860          * higher level page tables. Ensure that those entries exist.
 861          */
 862 #if defined(__amd64)
 863 
 864         NEXT_HKR(r, 3, kernelbase, 0);
 865 #if defined(__xpv)
 866         NEXT_HKR(r, 3, HYPERVISOR_VIRT_START, HYPERVISOR_VIRT_END);
 867 #endif
 868 
 869 #elif defined(__i386)
 870 
 871 #if !defined(__xpv)
 872         if (mmu.pae_hat) {
 873                 va = kernelbase;
 874                 if ((va & LEVEL_MASK(2)) != va) {
 875                         va = P2ROUNDUP(va, LEVEL_SIZE(2));
 876                         NEXT_HKR(r, 1, kernelbase, va);
 877                 }
 878                 if (va != 0)
 879                         NEXT_HKR(r, 2, va, 0);
 880         } else
 881 #endif /* __xpv */
 882                 NEXT_HKR(r, 1, kernelbase, 0);
 883 
 884 #endif /* __i386 */
 885 
 886         num_kernel_ranges = r;
 887 
 888         /*
 889          * Create all the kernel pagetables that will have entries
 890          * shared to user HATs.
 891          */
 892         for (r = 0; r < num_kernel_ranges; ++r) {
 893                 rp = &kernel_ranges[r];
 894                 for (va = rp->hkr_start_va; va != rp->hkr_end_va;
 895                     va += LEVEL_SIZE(rp->hkr_level)) {
 896                         htable_t *ht;
 897 
 898                         if (IN_HYPERVISOR_VA(va))
 899                                 continue;
 900 
 901                         /* can/must skip if a page mapping already exists */
 902                         if (rp->hkr_level <= mmu.max_page_level &&
 903                             (ht = htable_getpage(kas.a_hat, va, NULL)) !=
 904                             NULL) {
 905                                 htable_release(ht);
 906                                 continue;
 907                         }
 908 
 909                         (void) htable_create(kas.a_hat, va, rp->hkr_level - 1,
 910                             NULL);
 911                 }
 912         }
 913 
 914         /*
 915          * 32 bit PAE metal kernels use only 4 of the 512 entries in the
 916          * page holding the top level pagetable. We use the remainder for
 917          * the "per CPU" page tables for VLP processes.
 918          * Map the top level kernel pagetable into the kernel to make
 919          * it easy to use bcopy access these tables.
 920          */
 921         if (mmu.pae_hat) {
 922                 vlp_page = vmem_alloc(heap_arena, MMU_PAGESIZE, VM_SLEEP);
 923                 hat_devload(kas.a_hat, (caddr_t)vlp_page, MMU_PAGESIZE,
 924                     kas.a_hat->hat_htable->ht_pfn,
 925 #if !defined(__xpv)
 926                     PROT_WRITE |
 927 #endif
 928                     PROT_READ | HAT_NOSYNC | HAT_UNORDERED_OK,
 929                     HAT_LOAD | HAT_LOAD_NOCONSIST);
 930         }
 931         hat_vlp_setup(CPU);
 932 
 933         /*
 934          * Create kmap (cached mappings of kernel PTEs)
 935          * for 32 bit we map from segmap_start .. ekernelheap
 936          * for 64 bit we map from segmap_start .. segmap_start + segmapsize;
 937          */
 938 #if defined(__i386)
 939         size = (uintptr_t)ekernelheap - segmap_start;
 940 #elif defined(__amd64)
 941         size = segmapsize;
 942 #endif
 943         hat_kmap_init((uintptr_t)segmap_start, size);
 944 }
 945 
 946 /*
 947  * On 32 bit PAE mode, PTE's are 64 bits, but ordinary atomic memory references
 948  * are 32 bit, so for safety we must use atomic_cas_64() to install these.
 949  */
 950 #ifdef __i386
 951 static void
 952 reload_pae32(hat_t *hat, cpu_t *cpu)
 953 {
 954         x86pte_t *src;
 955         x86pte_t *dest;
 956         x86pte_t pte;
 957         int i;
 958 
 959         /*
 960          * Load the 4 entries of the level 2 page table into this
 961          * cpu's range of the vlp_page and point cr3 at them.
 962          */
 963         ASSERT(mmu.pae_hat);
 964         src = hat->hat_vlp_ptes;
 965         dest = vlp_page + (cpu->cpu_id + 1) * VLP_NUM_PTES;
 966         for (i = 0; i < VLP_NUM_PTES; ++i) {
 967                 for (;;) {
 968                         pte = dest[i];
 969                         if (pte == src[i])
 970                                 break;
 971                         if (atomic_cas_64(dest + i, pte, src[i]) != src[i])
 972                                 break;
 973                 }
 974         }
 975 }
 976 #endif
 977 
 978 /*
 979  * Switch to a new active hat, maintaining bit masks to track active CPUs.
 980  *
 981  * On the 32-bit PAE hypervisor, %cr3 is a 64-bit value, on metal it
 982  * remains a 32-bit value.
 983  */
 984 void
 985 hat_switch(hat_t *hat)
 986 {
 987         uint64_t        newcr3;
 988         cpu_t           *cpu = CPU;
 989         hat_t           *old = cpu->cpu_current_hat;
 990 
 991         /*
 992          * set up this information first, so we don't miss any cross calls
 993          */
 994         if (old != NULL) {
 995                 if (old == hat)
 996                         return;
 997                 if (old != kas.a_hat)
 998                         CPUSET_ATOMIC_DEL(old->hat_cpus, cpu->cpu_id);
 999         }
1000 
1001         /*
1002          * Add this CPU to the active set for this HAT.
1003          */
1004         if (hat != kas.a_hat) {
1005                 CPUSET_ATOMIC_ADD(hat->hat_cpus, cpu->cpu_id);
1006         }
1007         cpu->cpu_current_hat = hat;
1008 
1009         /*
1010          * now go ahead and load cr3
1011          */
1012         if (hat->hat_flags & HAT_VLP) {
1013 #if defined(__amd64)
1014                 x86pte_t *vlpptep = cpu->cpu_hat_info->hci_vlp_l2ptes;
1015 
1016                 VLP_COPY(hat->hat_vlp_ptes, vlpptep);
1017                 newcr3 = MAKECR3(cpu->cpu_hat_info->hci_vlp_pfn);
1018 #elif defined(__i386)
1019                 reload_pae32(hat, cpu);
1020                 newcr3 = MAKECR3(kas.a_hat->hat_htable->ht_pfn) +
1021                     (cpu->cpu_id + 1) * VLP_SIZE;
1022 #endif
1023         } else {
1024                 newcr3 = MAKECR3((uint64_t)hat->hat_htable->ht_pfn);
1025         }
1026 #ifdef __xpv
1027         {
1028                 struct mmuext_op t[2];
1029                 uint_t retcnt;
1030                 uint_t opcnt = 1;
1031 
1032                 t[0].cmd = MMUEXT_NEW_BASEPTR;
1033                 t[0].arg1.mfn = mmu_btop(pa_to_ma(newcr3));
1034 #if defined(__amd64)
1035                 /*
1036                  * There's an interesting problem here, as to what to
1037                  * actually specify when switching to the kernel hat.
1038                  * For now we'll reuse the kernel hat again.
1039                  */
1040                 t[1].cmd = MMUEXT_NEW_USER_BASEPTR;
1041                 if (hat == kas.a_hat)
1042                         t[1].arg1.mfn = mmu_btop(pa_to_ma(newcr3));
1043                 else
1044                         t[1].arg1.mfn = pfn_to_mfn(hat->hat_user_ptable);
1045                 ++opcnt;
1046 #endif  /* __amd64 */
1047                 if (HYPERVISOR_mmuext_op(t, opcnt, &retcnt, DOMID_SELF) < 0)
1048                         panic("HYPERVISOR_mmu_update() failed");
1049                 ASSERT(retcnt == opcnt);
1050 
1051         }
1052 #else
1053         setcr3(newcr3);
1054 #endif
1055         ASSERT(cpu == CPU);
1056 }
1057 
1058 /*
1059  * Utility to return a valid x86pte_t from protections, pfn, and level number
1060  */
1061 static x86pte_t
1062 hati_mkpte(pfn_t pfn, uint_t attr, level_t level, uint_t flags)
1063 {
1064         x86pte_t        pte;
1065         uint_t          cache_attr = attr & HAT_ORDER_MASK;
1066 
1067         pte = MAKEPTE(pfn, level);
1068 
1069         if (attr & PROT_WRITE)
1070                 PTE_SET(pte, PT_WRITABLE);
1071 
1072         if (attr & PROT_USER)
1073                 PTE_SET(pte, PT_USER);
1074 
1075         if (!(attr & PROT_EXEC))
1076                 PTE_SET(pte, mmu.pt_nx);
1077 
1078         /*
1079          * Set the software bits used track ref/mod sync's and hments.
1080          * If not using REF/MOD, set them to avoid h/w rewriting PTEs.
1081          */
1082         if (flags & HAT_LOAD_NOCONSIST)
1083                 PTE_SET(pte, PT_NOCONSIST | PT_REF | PT_MOD);
1084         else if (attr & HAT_NOSYNC)
1085                 PTE_SET(pte, PT_NOSYNC | PT_REF | PT_MOD);
1086 
1087         /*
1088          * Set the caching attributes in the PTE. The combination
1089          * of attributes are poorly defined, so we pay attention
1090          * to them in the given order.
1091          *
1092          * The test for HAT_STRICTORDER is different because it's defined
1093          * as "0" - which was a stupid thing to do, but is too late to change!
1094          */
1095         if (cache_attr == HAT_STRICTORDER) {
1096                 PTE_SET(pte, PT_NOCACHE);
1097         /*LINTED [Lint hates empty ifs, but it's the obvious way to do this] */
1098         } else if (cache_attr & (HAT_UNORDERED_OK | HAT_STORECACHING_OK)) {
1099                 /* nothing to set */;
1100         } else if (cache_attr & (HAT_MERGING_OK | HAT_LOADCACHING_OK)) {
1101                 PTE_SET(pte, PT_NOCACHE);
1102                 if (is_x86_feature(x86_featureset, X86FSET_PAT))
1103                         PTE_SET(pte, (level == 0) ? PT_PAT_4K : PT_PAT_LARGE);
1104                 else
1105                         PTE_SET(pte, PT_WRITETHRU);
1106         } else {
1107                 panic("hati_mkpte(): bad caching attributes: %x\n", cache_attr);
1108         }
1109 
1110         return (pte);
1111 }
1112 
1113 /*
1114  * Duplicate address translations of the parent to the child.
1115  * This function really isn't used anymore.
1116  */
1117 /*ARGSUSED*/
1118 int
1119 hat_dup(hat_t *old, hat_t *new, caddr_t addr, size_t len, uint_t flag)
1120 {
1121         ASSERT((uintptr_t)addr < kernelbase);
1122         ASSERT(new != kas.a_hat);
1123         ASSERT(old != kas.a_hat);
1124         return (0);
1125 }
1126 
1127 /*
1128  * Allocate any hat resources required for a process being swapped in.
1129  */
1130 /*ARGSUSED*/
1131 void
1132 hat_swapin(hat_t *hat)
1133 {
1134         /* do nothing - we let everything fault back in */
1135 }
1136 
1137 /*
1138  * Unload all translations associated with an address space of a process
1139  * that is being swapped out.
1140  */
1141 void
1142 hat_swapout(hat_t *hat)
1143 {
1144         uintptr_t       vaddr = (uintptr_t)0;
1145         uintptr_t       eaddr = _userlimit;
1146         htable_t        *ht = NULL;
1147         level_t         l;
1148 
1149         XPV_DISALLOW_MIGRATE();
1150         /*
1151          * We can't just call hat_unload(hat, 0, _userlimit...)  here, because
1152          * seg_spt and shared pagetables can't be swapped out.
1153          * Take a look at segspt_shmswapout() - it's a big no-op.
1154          *
1155          * Instead we'll walk through all the address space and unload
1156          * any mappings which we are sure are not shared, not locked.
1157          */
1158         ASSERT(IS_PAGEALIGNED(vaddr));
1159         ASSERT(IS_PAGEALIGNED(eaddr));
1160         ASSERT(AS_LOCK_HELD(hat->hat_as, &hat->hat_as->a_lock));
1161         if ((uintptr_t)hat->hat_as->a_userlimit < eaddr)
1162                 eaddr = (uintptr_t)hat->hat_as->a_userlimit;
1163 
1164         while (vaddr < eaddr) {
1165                 (void) htable_walk(hat, &ht, &vaddr, eaddr);
1166                 if (ht == NULL)
1167                         break;
1168 
1169                 ASSERT(!IN_VA_HOLE(vaddr));
1170 
1171                 /*
1172                  * If the page table is shared skip its entire range.
1173                  */
1174                 l = ht->ht_level;
1175                 if (ht->ht_flags & HTABLE_SHARED_PFN) {
1176                         vaddr = ht->ht_vaddr + LEVEL_SIZE(l + 1);
1177                         htable_release(ht);
1178                         ht = NULL;
1179                         continue;
1180                 }
1181 
1182                 /*
1183                  * If the page table has no locked entries, unload this one.
1184                  */
1185                 if (ht->ht_lock_cnt == 0)
1186                         hat_unload(hat, (caddr_t)vaddr, LEVEL_SIZE(l),
1187                             HAT_UNLOAD_UNMAP);
1188 
1189                 /*
1190                  * If we have a level 0 page table with locked entries,
1191                  * skip the entire page table, otherwise skip just one entry.
1192                  */
1193                 if (ht->ht_lock_cnt > 0 && l == 0)
1194                         vaddr = ht->ht_vaddr + LEVEL_SIZE(1);
1195                 else
1196                         vaddr += LEVEL_SIZE(l);
1197         }
1198         if (ht)
1199                 htable_release(ht);
1200 
1201         /*
1202          * We're in swapout because the system is low on memory, so
1203          * go back and flush all the htables off the cached list.
1204          */
1205         htable_purge_hat(hat);
1206         XPV_ALLOW_MIGRATE();
1207 }
1208 
1209 /*
1210  * returns number of bytes that have valid mappings in hat.
1211  */
1212 size_t
1213 hat_get_mapped_size(hat_t *hat)
1214 {
1215         size_t total = 0;
1216         int l;
1217 
1218         for (l = 0; l <= mmu.max_page_level; l++)
1219                 total += (hat->hat_pages_mapped[l] << LEVEL_SHIFT(l));
1220         total += hat->hat_ism_pgcnt;
1221 
1222         return (total);
1223 }
1224 
1225 /*
1226  * enable/disable collection of stats for hat.
1227  */
1228 int
1229 hat_stats_enable(hat_t *hat)
1230 {
1231         atomic_inc_32(&hat->hat_stats);
1232         return (1);
1233 }
1234 
1235 void
1236 hat_stats_disable(hat_t *hat)
1237 {
1238         atomic_dec_32(&hat->hat_stats);
1239 }
1240 
1241 /*
1242  * Utility to sync the ref/mod bits from a page table entry to the page_t
1243  * We must be holding the mapping list lock when this is called.
1244  */
1245 static void
1246 hati_sync_pte_to_page(page_t *pp, x86pte_t pte, level_t level)
1247 {
1248         uint_t  rm = 0;
1249         pgcnt_t pgcnt;
1250 
1251         if (PTE_GET(pte, PT_SOFTWARE) >= PT_NOSYNC)
1252                 return;
1253 
1254         if (PTE_GET(pte, PT_REF))
1255                 rm |= P_REF;
1256 
1257         if (PTE_GET(pte, PT_MOD))
1258                 rm |= P_MOD;
1259 
1260         if (rm == 0)
1261                 return;
1262 
1263         /*
1264          * sync to all constituent pages of a large page
1265          */
1266         ASSERT(x86_hm_held(pp));
1267         pgcnt = page_get_pagecnt(level);
1268         ASSERT(IS_P2ALIGNED(pp->p_pagenum, pgcnt));
1269         for (; pgcnt > 0; --pgcnt) {
1270                 /*
1271                  * hat_page_demote() can't decrease
1272                  * pszc below this mapping size
1273                  * since this large mapping existed after we
1274                  * took mlist lock.
1275                  */
1276                 ASSERT(pp->p_szc >= level);
1277                 hat_page_setattr(pp, rm);
1278                 ++pp;
1279         }
1280 }
1281 
1282 /*
1283  * This the set of PTE bits for PFN, permissions and caching
1284  * that are allowed to change on a HAT_LOAD_REMAP
1285  */
1286 #define PT_REMAP_BITS                                                   \
1287         (PT_PADDR | PT_NX | PT_WRITABLE | PT_WRITETHRU |                \
1288         PT_NOCACHE | PT_PAT_4K | PT_PAT_LARGE | PT_IGNORE | PT_REF | PT_MOD)
1289 
1290 #define REMAPASSERT(EX) if (!(EX)) panic("hati_pte_map: " #EX)
1291 /*
1292  * Do the low-level work to get a mapping entered into a HAT's pagetables
1293  * and in the mapping list of the associated page_t.
1294  */
1295 static int
1296 hati_pte_map(
1297         htable_t        *ht,
1298         uint_t          entry,
1299         page_t          *pp,
1300         x86pte_t        pte,
1301         int             flags,
1302         void            *pte_ptr)
1303 {
1304         hat_t           *hat = ht->ht_hat;
1305         x86pte_t        old_pte;
1306         level_t         l = ht->ht_level;
1307         hment_t         *hm;
1308         uint_t          is_consist;
1309         uint_t          is_locked;
1310         int             rv = 0;
1311 
1312         /*
1313          * Is this a consistent (ie. need mapping list lock) mapping?
1314          */
1315         is_consist = (pp != NULL && (flags & HAT_LOAD_NOCONSIST) == 0);
1316 
1317         /*
1318          * Track locked mapping count in the htable.  Do this first,
1319          * as we track locking even if there already is a mapping present.
1320          */
1321         is_locked = (flags & HAT_LOAD_LOCK) != 0 && hat != kas.a_hat;
1322         if (is_locked)
1323                 HTABLE_LOCK_INC(ht);
1324 
1325         /*
1326          * Acquire the page's mapping list lock and get an hment to use.
1327          * Note that hment_prepare() might return NULL.
1328          */
1329         if (is_consist) {
1330                 x86_hm_enter(pp);
1331                 hm = hment_prepare(ht, entry, pp);
1332         }
1333 
1334         /*
1335          * Set the new pte, retrieving the old one at the same time.
1336          */
1337         old_pte = x86pte_set(ht, entry, pte, pte_ptr);
1338 
1339         /*
1340          * Did we get a large page / page table collision?
1341          */
1342         if (old_pte == LPAGE_ERROR) {
1343                 if (is_locked)
1344                         HTABLE_LOCK_DEC(ht);
1345                 rv = -1;
1346                 goto done;
1347         }
1348 
1349         /*
1350          * If the mapping didn't change there is nothing more to do.
1351          */
1352         if (PTE_EQUIV(pte, old_pte))
1353                 goto done;
1354 
1355         /*
1356          * Install a new mapping in the page's mapping list
1357          */
1358         if (!PTE_ISVALID(old_pte)) {
1359                 if (is_consist) {
1360                         hment_assign(ht, entry, pp, hm);
1361                         x86_hm_exit(pp);
1362                 } else {
1363                         ASSERT(flags & HAT_LOAD_NOCONSIST);
1364                 }
1365 #if defined(__amd64)
1366                 if (ht->ht_flags & HTABLE_VLP) {
1367                         cpu_t *cpu = CPU;
1368                         x86pte_t *vlpptep = cpu->cpu_hat_info->hci_vlp_l2ptes;
1369                         VLP_COPY(hat->hat_vlp_ptes, vlpptep);
1370                 }
1371 #endif
1372                 HTABLE_INC(ht->ht_valid_cnt);
1373                 PGCNT_INC(hat, l);
1374                 return (rv);
1375         }
1376 
1377         /*
1378          * Remap's are more complicated:
1379          *  - HAT_LOAD_REMAP must be specified if changing the pfn.
1380          *    We also require that NOCONSIST be specified.
1381          *  - Otherwise only permission or caching bits may change.
1382          */
1383         if (!PTE_ISPAGE(old_pte, l))
1384                 panic("non-null/page mapping pte=" FMT_PTE, old_pte);
1385 
1386         if (PTE2PFN(old_pte, l) != PTE2PFN(pte, l)) {
1387                 REMAPASSERT(flags & HAT_LOAD_REMAP);
1388                 REMAPASSERT(flags & HAT_LOAD_NOCONSIST);
1389                 REMAPASSERT(PTE_GET(old_pte, PT_SOFTWARE) >= PT_NOCONSIST);
1390                 REMAPASSERT(pf_is_memory(PTE2PFN(old_pte, l)) ==
1391                     pf_is_memory(PTE2PFN(pte, l)));
1392                 REMAPASSERT(!is_consist);
1393         }
1394 
1395         /*
1396          * We only let remaps change the certain bits in the PTE.
1397          */
1398         if (PTE_GET(old_pte, ~PT_REMAP_BITS) != PTE_GET(pte, ~PT_REMAP_BITS))
1399                 panic("remap bits changed: old_pte="FMT_PTE", pte="FMT_PTE"\n",
1400                     old_pte, pte);
1401 
1402         /*
1403          * We don't create any mapping list entries on a remap, so release
1404          * any allocated hment after we drop the mapping list lock.
1405          */
1406 done:
1407         if (is_consist) {
1408                 x86_hm_exit(pp);
1409                 if (hm != NULL)
1410                         hment_free(hm);
1411         }
1412         return (rv);
1413 }
1414 
1415 /*
1416  * Internal routine to load a single page table entry. This only fails if
1417  * we attempt to overwrite a page table link with a large page.
1418  */
1419 static int
1420 hati_load_common(
1421         hat_t           *hat,
1422         uintptr_t       va,
1423         page_t          *pp,
1424         uint_t          attr,
1425         uint_t          flags,
1426         level_t         level,
1427         pfn_t           pfn)
1428 {
1429         htable_t        *ht;
1430         uint_t          entry;
1431         x86pte_t        pte;
1432         int             rv = 0;
1433 
1434         /*
1435          * The number 16 is arbitrary and here to catch a recursion problem
1436          * early before we blow out the kernel stack.
1437          */
1438         ++curthread->t_hatdepth;
1439         ASSERT(curthread->t_hatdepth < 16);
1440 
1441         ASSERT(hat == kas.a_hat ||
1442             AS_LOCK_HELD(hat->hat_as, &hat->hat_as->a_lock));
1443 
1444         if (flags & HAT_LOAD_SHARE)
1445                 hat->hat_flags |= HAT_SHARED;
1446 
1447         /*
1448          * Find the page table that maps this page if it already exists.
1449          */
1450         ht = htable_lookup(hat, va, level);
1451 
1452         /*
1453          * We must have HAT_LOAD_NOCONSIST if page_t is NULL.
1454          */
1455         if (pp == NULL)
1456                 flags |= HAT_LOAD_NOCONSIST;
1457 
1458         if (ht == NULL) {
1459                 ht = htable_create(hat, va, level, NULL);
1460                 ASSERT(ht != NULL);
1461         }
1462         entry = htable_va2entry(va, ht);
1463 
1464         /*
1465          * a bunch of paranoid error checking
1466          */
1467         ASSERT(ht->ht_busy > 0);
1468         if (ht->ht_vaddr > va || va > HTABLE_LAST_PAGE(ht))
1469                 panic("hati_load_common: bad htable %p, va %p",
1470                     (void *)ht, (void *)va);
1471         ASSERT(ht->ht_level == level);
1472 
1473         /*
1474          * construct the new PTE
1475          */
1476         if (hat == kas.a_hat)
1477                 attr &= ~PROT_USER;
1478         pte = hati_mkpte(pfn, attr, level, flags);
1479         if (hat == kas.a_hat && va >= kernelbase)
1480                 PTE_SET(pte, mmu.pt_global);
1481 
1482         /*
1483          * establish the mapping
1484          */
1485         rv = hati_pte_map(ht, entry, pp, pte, flags, NULL);
1486 
1487         /*
1488          * release the htable and any reserves
1489          */
1490         htable_release(ht);
1491         --curthread->t_hatdepth;
1492         return (rv);
1493 }
1494 
1495 /*
1496  * special case of hat_memload to deal with some kernel addrs for performance
1497  */
1498 static void
1499 hat_kmap_load(
1500         caddr_t         addr,
1501         page_t          *pp,
1502         uint_t          attr,
1503         uint_t          flags)
1504 {
1505         uintptr_t       va = (uintptr_t)addr;
1506         x86pte_t        pte;
1507         pfn_t           pfn = page_pptonum(pp);
1508         pgcnt_t         pg_off = mmu_btop(va - mmu.kmap_addr);
1509         htable_t        *ht;
1510         uint_t          entry;
1511         void            *pte_ptr;
1512 
1513         /*
1514          * construct the requested PTE
1515          */
1516         attr &= ~PROT_USER;
1517         attr |= HAT_STORECACHING_OK;
1518         pte = hati_mkpte(pfn, attr, 0, flags);
1519         PTE_SET(pte, mmu.pt_global);
1520 
1521         /*
1522          * Figure out the pte_ptr and htable and use common code to finish up
1523          */
1524         if (mmu.pae_hat)
1525                 pte_ptr = mmu.kmap_ptes + pg_off;
1526         else
1527                 pte_ptr = (x86pte32_t *)mmu.kmap_ptes + pg_off;
1528         ht = mmu.kmap_htables[(va - mmu.kmap_htables[0]->ht_vaddr) >>
1529             LEVEL_SHIFT(1)];
1530         entry = htable_va2entry(va, ht);
1531         ++curthread->t_hatdepth;
1532         ASSERT(curthread->t_hatdepth < 16);
1533         (void) hati_pte_map(ht, entry, pp, pte, flags, pte_ptr);
1534         --curthread->t_hatdepth;
1535 }
1536 
1537 /*
1538  * hat_memload() - load a translation to the given page struct
1539  *
1540  * Flags for hat_memload/hat_devload/hat_*attr.
1541  *
1542  *      HAT_LOAD        Default flags to load a translation to the page.
1543  *
1544  *      HAT_LOAD_LOCK   Lock down mapping resources; hat_map(), hat_memload(),
1545  *                      and hat_devload().
1546  *
1547  *      HAT_LOAD_NOCONSIST Do not add mapping to page_t mapping list.
1548  *                      sets PT_NOCONSIST
1549  *
1550  *      HAT_LOAD_SHARE  A flag to hat_memload() to indicate h/w page tables
1551  *                      that map some user pages (not kas) is shared by more
1552  *                      than one process (eg. ISM).
1553  *
1554  *      HAT_LOAD_REMAP  Reload a valid pte with a different page frame.
1555  *
1556  *      HAT_NO_KALLOC   Do not kmem_alloc while creating the mapping; at this
1557  *                      point, it's setting up mapping to allocate internal
1558  *                      hat layer data structures.  This flag forces hat layer
1559  *                      to tap its reserves in order to prevent infinite
1560  *                      recursion.
1561  *
1562  * The following is a protection attribute (like PROT_READ, etc.)
1563  *
1564  *      HAT_NOSYNC      set PT_NOSYNC - this mapping's ref/mod bits
1565  *                      are never cleared.
1566  *
1567  * Installing new valid PTE's and creation of the mapping list
1568  * entry are controlled under the same lock. It's derived from the
1569  * page_t being mapped.
1570  */
1571 static uint_t supported_memload_flags =
1572         HAT_LOAD | HAT_LOAD_LOCK | HAT_LOAD_ADV | HAT_LOAD_NOCONSIST |
1573         HAT_LOAD_SHARE | HAT_NO_KALLOC | HAT_LOAD_REMAP | HAT_LOAD_TEXT;
1574 
1575 void
1576 hat_memload(
1577         hat_t           *hat,
1578         caddr_t         addr,
1579         page_t          *pp,
1580         uint_t          attr,
1581         uint_t          flags)
1582 {
1583         uintptr_t       va = (uintptr_t)addr;
1584         level_t         level = 0;
1585         pfn_t           pfn = page_pptonum(pp);
1586 
1587         XPV_DISALLOW_MIGRATE();
1588         ASSERT(IS_PAGEALIGNED(va));
1589         ASSERT(hat == kas.a_hat || va < _userlimit);
1590         ASSERT(hat == kas.a_hat ||
1591             AS_LOCK_HELD(hat->hat_as, &hat->hat_as->a_lock));
1592         ASSERT((flags & supported_memload_flags) == flags);
1593 
1594         ASSERT(!IN_VA_HOLE(va));
1595         ASSERT(!PP_ISFREE(pp));
1596 
1597         /*
1598          * kernel address special case for performance.
1599          */
1600         if (mmu.kmap_addr <= va && va < mmu.kmap_eaddr) {
1601                 ASSERT(hat == kas.a_hat);
1602                 hat_kmap_load(addr, pp, attr, flags);
1603                 XPV_ALLOW_MIGRATE();
1604                 return;
1605         }
1606 
1607         /*
1608          * This is used for memory with normal caching enabled, so
1609          * always set HAT_STORECACHING_OK.
1610          */
1611         attr |= HAT_STORECACHING_OK;
1612         if (hati_load_common(hat, va, pp, attr, flags, level, pfn) != 0)
1613                 panic("unexpected hati_load_common() failure");
1614         XPV_ALLOW_MIGRATE();
1615 }
1616 
1617 /* ARGSUSED */
1618 void
1619 hat_memload_region(struct hat *hat, caddr_t addr, struct page *pp,
1620     uint_t attr, uint_t flags, hat_region_cookie_t rcookie)
1621 {
1622         hat_memload(hat, addr, pp, attr, flags);
1623 }
1624 
1625 /*
1626  * Load the given array of page structs using large pages when possible
1627  */
1628 void
1629 hat_memload_array(
1630         hat_t           *hat,
1631         caddr_t         addr,
1632         size_t          len,
1633         page_t          **pages,
1634         uint_t          attr,
1635         uint_t          flags)
1636 {
1637         uintptr_t       va = (uintptr_t)addr;
1638         uintptr_t       eaddr = va + len;
1639         level_t         level;
1640         size_t          pgsize;
1641         pgcnt_t         pgindx = 0;
1642         pfn_t           pfn;
1643         pgcnt_t         i;
1644 
1645         XPV_DISALLOW_MIGRATE();
1646         ASSERT(IS_PAGEALIGNED(va));
1647         ASSERT(hat == kas.a_hat || va + len <= _userlimit);
1648         ASSERT(hat == kas.a_hat ||
1649             AS_LOCK_HELD(hat->hat_as, &hat->hat_as->a_lock));
1650         ASSERT((flags & supported_memload_flags) == flags);
1651 
1652         /*
1653          * memload is used for memory with full caching enabled, so
1654          * set HAT_STORECACHING_OK.
1655          */
1656         attr |= HAT_STORECACHING_OK;
1657 
1658         /*
1659          * handle all pages using largest possible pagesize
1660          */
1661         while (va < eaddr) {
1662                 /*
1663                  * decide what level mapping to use (ie. pagesize)
1664                  */
1665                 pfn = page_pptonum(pages[pgindx]);
1666                 for (level = mmu.max_page_level; ; --level) {
1667                         pgsize = LEVEL_SIZE(level);
1668                         if (level == 0)
1669                                 break;
1670 
1671                         if (!IS_P2ALIGNED(va, pgsize) ||
1672                             (eaddr - va) < pgsize ||
1673                             !IS_P2ALIGNED(pfn_to_pa(pfn), pgsize))
1674                                 continue;
1675 
1676                         /*
1677                          * To use a large mapping of this size, all the
1678                          * pages we are passed must be sequential subpages
1679                          * of the large page.
1680                          * hat_page_demote() can't change p_szc because
1681                          * all pages are locked.
1682                          */
1683                         if (pages[pgindx]->p_szc >= level) {
1684                                 for (i = 0; i < mmu_btop(pgsize); ++i) {
1685                                         if (pfn + i !=
1686                                             page_pptonum(pages[pgindx + i]))
1687                                                 break;
1688                                         ASSERT(pages[pgindx + i]->p_szc >=
1689                                             level);
1690                                         ASSERT(pages[pgindx] + i ==
1691                                             pages[pgindx + i]);
1692                                 }
1693                                 if (i == mmu_btop(pgsize)) {
1694 #ifdef DEBUG
1695                                         if (level == 2)
1696                                                 map1gcnt++;
1697 #endif
1698                                         break;
1699                                 }
1700                         }
1701                 }
1702 
1703                 /*
1704                  * Load this page mapping. If the load fails, try a smaller
1705                  * pagesize.
1706                  */
1707                 ASSERT(!IN_VA_HOLE(va));
1708                 while (hati_load_common(hat, va, pages[pgindx], attr,
1709                     flags, level, pfn) != 0) {
1710                         if (level == 0)
1711                                 panic("unexpected hati_load_common() failure");
1712                         --level;
1713                         pgsize = LEVEL_SIZE(level);
1714                 }
1715 
1716                 /*
1717                  * move to next page
1718                  */
1719                 va += pgsize;
1720                 pgindx += mmu_btop(pgsize);
1721         }
1722         XPV_ALLOW_MIGRATE();
1723 }
1724 
1725 /* ARGSUSED */
1726 void
1727 hat_memload_array_region(struct hat *hat, caddr_t addr, size_t len,
1728     struct page **pps, uint_t attr, uint_t flags,
1729     hat_region_cookie_t rcookie)
1730 {
1731         hat_memload_array(hat, addr, len, pps, attr, flags);
1732 }
1733 
1734 /*
1735  * void hat_devload(hat, addr, len, pf, attr, flags)
1736  *      load/lock the given page frame number
1737  *
1738  * Advisory ordering attributes. Apply only to device mappings.
1739  *
1740  * HAT_STRICTORDER: the CPU must issue the references in order, as the
1741  *      programmer specified.  This is the default.
1742  * HAT_UNORDERED_OK: the CPU may reorder the references (this is all kinds
1743  *      of reordering; store or load with store or load).
1744  * HAT_MERGING_OK: merging and batching: the CPU may merge individual stores
1745  *      to consecutive locations (for example, turn two consecutive byte
1746  *      stores into one halfword store), and it may batch individual loads
1747  *      (for example, turn two consecutive byte loads into one halfword load).
1748  *      This also implies re-ordering.
1749  * HAT_LOADCACHING_OK: the CPU may cache the data it fetches and reuse it
1750  *      until another store occurs.  The default is to fetch new data
1751  *      on every load.  This also implies merging.
1752  * HAT_STORECACHING_OK: the CPU may keep the data in the cache and push it to
1753  *      the device (perhaps with other data) at a later time.  The default is
1754  *      to push the data right away.  This also implies load caching.
1755  *
1756  * Equivalent of hat_memload(), but can be used for device memory where
1757  * there are no page_t's and we support additional flags (write merging, etc).
1758  * Note that we can have large page mappings with this interface.
1759  */
1760 int supported_devload_flags = HAT_LOAD | HAT_LOAD_LOCK |
1761         HAT_LOAD_NOCONSIST | HAT_STRICTORDER | HAT_UNORDERED_OK |
1762         HAT_MERGING_OK | HAT_LOADCACHING_OK | HAT_STORECACHING_OK;
1763 
1764 void
1765 hat_devload(
1766         hat_t           *hat,
1767         caddr_t         addr,
1768         size_t          len,
1769         pfn_t           pfn,
1770         uint_t          attr,
1771         int             flags)
1772 {
1773         uintptr_t       va = ALIGN2PAGE(addr);
1774         uintptr_t       eva = va + len;
1775         level_t         level;
1776         size_t          pgsize;
1777         page_t          *pp;
1778         int             f;      /* per PTE copy of flags  - maybe modified */
1779         uint_t          a;      /* per PTE copy of attr */
1780 
1781         XPV_DISALLOW_MIGRATE();
1782         ASSERT(IS_PAGEALIGNED(va));
1783         ASSERT(hat == kas.a_hat || eva <= _userlimit);
1784         ASSERT(hat == kas.a_hat ||
1785             AS_LOCK_HELD(hat->hat_as, &hat->hat_as->a_lock));
1786         ASSERT((flags & supported_devload_flags) == flags);
1787 
1788         /*
1789          * handle all pages
1790          */
1791         while (va < eva) {
1792 
1793                 /*
1794                  * decide what level mapping to use (ie. pagesize)
1795                  */
1796                 for (level = mmu.max_page_level; ; --level) {
1797                         pgsize = LEVEL_SIZE(level);
1798                         if (level == 0)
1799                                 break;
1800                         if (IS_P2ALIGNED(va, pgsize) &&
1801                             (eva - va) >= pgsize &&
1802                             IS_P2ALIGNED(pfn, mmu_btop(pgsize))) {
1803 #ifdef DEBUG
1804                                 if (level == 2)
1805                                         map1gcnt++;
1806 #endif
1807                                 break;
1808                         }
1809                 }
1810 
1811                 /*
1812                  * If this is just memory then allow caching (this happens
1813                  * for the nucleus pages) - though HAT_PLAT_NOCACHE can be used
1814                  * to override that. If we don't have a page_t then make sure
1815                  * NOCONSIST is set.
1816                  */
1817                 a = attr;
1818                 f = flags;
1819                 if (!pf_is_memory(pfn))
1820                         f |= HAT_LOAD_NOCONSIST;
1821                 else if (!(a & HAT_PLAT_NOCACHE))
1822                         a |= HAT_STORECACHING_OK;
1823 
1824                 if (f & HAT_LOAD_NOCONSIST)
1825                         pp = NULL;
1826                 else
1827                         pp = page_numtopp_nolock(pfn);
1828 
1829                 /*
1830                  * Check to make sure we are really trying to map a valid
1831                  * memory page. The caller wishing to intentionally map
1832                  * free memory pages will have passed the HAT_LOAD_NOCONSIST
1833                  * flag, then pp will be NULL.
1834                  */
1835                 if (pp != NULL) {
1836                         if (PP_ISFREE(pp)) {
1837                                 panic("hat_devload: loading "
1838                                     "a mapping to free page %p", (void *)pp);
1839                         }
1840 
1841                         if (!PAGE_LOCKED(pp) && !PP_ISNORELOC(pp)) {
1842                                 panic("hat_devload: loading a mapping "
1843                                     "to an unlocked page %p",
1844                                     (void *)pp);
1845                         }
1846                 }
1847 
1848                 /*
1849                  * load this page mapping
1850                  */
1851                 ASSERT(!IN_VA_HOLE(va));
1852                 while (hati_load_common(hat, va, pp, a, f, level, pfn) != 0) {
1853                         if (level == 0)
1854                                 panic("unexpected hati_load_common() failure");
1855                         --level;
1856                         pgsize = LEVEL_SIZE(level);
1857                 }
1858 
1859                 /*
1860                  * move to next page
1861                  */
1862                 va += pgsize;
1863                 pfn += mmu_btop(pgsize);
1864         }
1865         XPV_ALLOW_MIGRATE();
1866 }
1867 
1868 /*
1869  * void hat_unlock(hat, addr, len)
1870  *      unlock the mappings to a given range of addresses
1871  *
1872  * Locks are tracked by ht_lock_cnt in the htable.
1873  */
1874 void
1875 hat_unlock(hat_t *hat, caddr_t addr, size_t len)
1876 {
1877         uintptr_t       vaddr = (uintptr_t)addr;
1878         uintptr_t       eaddr = vaddr + len;
1879         htable_t        *ht = NULL;
1880 
1881         /*
1882          * kernel entries are always locked, we don't track lock counts
1883          */
1884         ASSERT(hat == kas.a_hat || eaddr <= _userlimit);
1885         ASSERT(IS_PAGEALIGNED(vaddr));
1886         ASSERT(IS_PAGEALIGNED(eaddr));
1887         if (hat == kas.a_hat)
1888                 return;
1889         if (eaddr > _userlimit)
1890                 panic("hat_unlock() address out of range - above _userlimit");
1891 
1892         XPV_DISALLOW_MIGRATE();
1893         ASSERT(AS_LOCK_HELD(hat->hat_as, &hat->hat_as->a_lock));
1894         while (vaddr < eaddr) {
1895                 (void) htable_walk(hat, &ht, &vaddr, eaddr);
1896                 if (ht == NULL)
1897                         break;
1898 
1899                 ASSERT(!IN_VA_HOLE(vaddr));
1900 
1901                 if (ht->ht_lock_cnt < 1)
1902                         panic("hat_unlock(): lock_cnt < 1, "
1903                             "htable=%p, vaddr=%p\n", (void *)ht, (void *)vaddr);
1904                 HTABLE_LOCK_DEC(ht);
1905 
1906                 vaddr += LEVEL_SIZE(ht->ht_level);
1907         }
1908         if (ht)
1909                 htable_release(ht);
1910         XPV_ALLOW_MIGRATE();
1911 }
1912 
1913 /* ARGSUSED */
1914 void
1915 hat_unlock_region(struct hat *hat, caddr_t addr, size_t len,
1916     hat_region_cookie_t rcookie)
1917 {
1918         panic("No shared region support on x86");
1919 }
1920 
1921 #if !defined(__xpv)
1922 /*
1923  * Cross call service routine to demap a virtual page on
1924  * the current CPU or flush all mappings in TLB.
1925  */
1926 /*ARGSUSED*/
1927 static int
1928 hati_demap_func(xc_arg_t a1, xc_arg_t a2, xc_arg_t a3)
1929 {
1930         hat_t   *hat = (hat_t *)a1;
1931         caddr_t addr = (caddr_t)a2;
1932         size_t len = (size_t)a3;
1933 
1934         /*
1935          * If the target hat isn't the kernel and this CPU isn't operating
1936          * in the target hat, we can ignore the cross call.
1937          */
1938         if (hat != kas.a_hat && hat != CPU->cpu_current_hat)
1939                 return (0);
1940 
1941         /*
1942          * For a normal address, we flush a range of contiguous mappings
1943          */
1944         if ((uintptr_t)addr != DEMAP_ALL_ADDR) {
1945                 for (size_t i = 0; i < len; i += MMU_PAGESIZE)
1946                         mmu_tlbflush_entry(addr + i);
1947                 return (0);
1948         }
1949 
1950         /*
1951          * Otherwise we reload cr3 to effect a complete TLB flush.
1952          *
1953          * A reload of cr3 on a VLP process also means we must also recopy in
1954          * the pte values from the struct hat
1955          */
1956         if (hat->hat_flags & HAT_VLP) {
1957 #if defined(__amd64)
1958                 x86pte_t *vlpptep = CPU->cpu_hat_info->hci_vlp_l2ptes;
1959 
1960                 VLP_COPY(hat->hat_vlp_ptes, vlpptep);
1961 #elif defined(__i386)
1962                 reload_pae32(hat, CPU);
1963 #endif
1964         }
1965         reload_cr3();
1966         return (0);
1967 }
1968 
1969 /*
1970  * Flush all TLB entries, including global (ie. kernel) ones.
1971  */
1972 static void
1973 flush_all_tlb_entries(void)
1974 {
1975         ulong_t cr4 = getcr4();
1976 
1977         if (cr4 & CR4_PGE) {
1978                 setcr4(cr4 & ~(ulong_t)CR4_PGE);
1979                 setcr4(cr4);
1980 
1981                 /*
1982                  * 32 bit PAE also needs to always reload_cr3()
1983                  */
1984                 if (mmu.max_level == 2)
1985                         reload_cr3();
1986         } else {
1987                 reload_cr3();
1988         }
1989 }
1990 
1991 #define TLB_CPU_HALTED  (01ul)
1992 #define TLB_INVAL_ALL   (02ul)
1993 #define CAS_TLB_INFO(cpu, old, new)     \
1994         atomic_cas_ulong((ulong_t *)&(cpu)->cpu_m.mcpu_tlb_info, (old), (new))
1995 
1996 /*
1997  * Record that a CPU is going idle
1998  */
1999 void
2000 tlb_going_idle(void)
2001 {
2002         atomic_or_ulong((ulong_t *)&CPU->cpu_m.mcpu_tlb_info, TLB_CPU_HALTED);
2003 }
2004 
2005 /*
2006  * Service a delayed TLB flush if coming out of being idle.
2007  * It will be called from cpu idle notification with interrupt disabled.
2008  */
2009 void
2010 tlb_service(void)
2011 {
2012         ulong_t tlb_info;
2013         ulong_t found;
2014 
2015         /*
2016          * We only have to do something if coming out of being idle.
2017          */
2018         tlb_info = CPU->cpu_m.mcpu_tlb_info;
2019         if (tlb_info & TLB_CPU_HALTED) {
2020                 ASSERT(CPU->cpu_current_hat == kas.a_hat);
2021 
2022                 /*
2023                  * Atomic clear and fetch of old state.
2024                  */
2025                 while ((found = CAS_TLB_INFO(CPU, tlb_info, 0)) != tlb_info) {
2026                         ASSERT(found & TLB_CPU_HALTED);
2027                         tlb_info = found;
2028                         SMT_PAUSE();
2029                 }
2030                 if (tlb_info & TLB_INVAL_ALL)
2031                         flush_all_tlb_entries();
2032         }
2033 }
2034 #endif /* !__xpv */
2035 
2036 /*
2037  * Internal routine to do cross calls to invalidate a range of pages on
2038  * all CPUs using a given hat.
2039  */
2040 void
2041 hat_tlb_inval_range(hat_t *hat, uintptr_t va, size_t len)
2042 {
2043         extern int      flushes_require_xcalls; /* from mp_startup.c */
2044         cpuset_t        justme;
2045         cpuset_t        cpus_to_shootdown;
2046 #ifndef __xpv
2047         cpuset_t        check_cpus;
2048         cpu_t           *cpup;
2049         int             c;
2050 #endif
2051 
2052         /*
2053          * If the hat is being destroyed, there are no more users, so
2054          * demap need not do anything.
2055          */
2056         if (hat->hat_flags & HAT_FREEING)
2057                 return;
2058 
2059         /*
2060          * If demapping from a shared pagetable, we best demap the
2061          * entire set of user TLBs, since we don't know what addresses
2062          * these were shared at.
2063          */
2064         if (hat->hat_flags & HAT_SHARED) {
2065                 hat = kas.a_hat;
2066                 va = DEMAP_ALL_ADDR;
2067         }
2068 
2069         /*
2070          * if not running with multiple CPUs, don't use cross calls
2071          */
2072         if (panicstr || !flushes_require_xcalls) {
2073 #ifdef __xpv
2074                 if (va == DEMAP_ALL_ADDR) {
2075                         xen_flush_tlb();
2076                 } else {
2077                         for (size_t i = 0; i < len; i += MMU_PAGESIZE)
2078                                 xen_flush_va((caddr_t)(va + i));
2079                 }
2080 #else
2081                 (void) hati_demap_func((xc_arg_t)hat,
2082                     (xc_arg_t)va, (xc_arg_t)len);
2083 #endif
2084                 return;
2085         }
2086 
2087 
2088         /*
2089          * Determine CPUs to shootdown. Kernel changes always do all CPUs.
2090          * Otherwise it's just CPUs currently executing in this hat.
2091          */
2092         kpreempt_disable();
2093         CPUSET_ONLY(justme, CPU->cpu_id);
2094         if (hat == kas.a_hat)
2095                 cpus_to_shootdown = khat_cpuset;
2096         else
2097                 cpus_to_shootdown = hat->hat_cpus;
2098 
2099 #ifndef __xpv
2100         /*
2101          * If any CPUs in the set are idle, just request a delayed flush
2102          * and avoid waking them up.
2103          */
2104         check_cpus = cpus_to_shootdown;
2105         for (c = 0; c < NCPU && !CPUSET_ISNULL(check_cpus); ++c) {
2106                 ulong_t tlb_info;
2107 
2108                 if (!CPU_IN_SET(check_cpus, c))
2109                         continue;
2110                 CPUSET_DEL(check_cpus, c);
2111                 cpup = cpu[c];
2112                 if (cpup == NULL)
2113                         continue;
2114 
2115                 tlb_info = cpup->cpu_m.mcpu_tlb_info;
2116                 while (tlb_info == TLB_CPU_HALTED) {
2117                         (void) CAS_TLB_INFO(cpup, TLB_CPU_HALTED,
2118                             TLB_CPU_HALTED | TLB_INVAL_ALL);
2119                         SMT_PAUSE();
2120                         tlb_info = cpup->cpu_m.mcpu_tlb_info;
2121                 }
2122                 if (tlb_info == (TLB_CPU_HALTED | TLB_INVAL_ALL)) {
2123                         HATSTAT_INC(hs_tlb_inval_delayed);
2124                         CPUSET_DEL(cpus_to_shootdown, c);
2125                 }
2126         }
2127 #endif
2128 
2129         if (CPUSET_ISNULL(cpus_to_shootdown) ||
2130             CPUSET_ISEQUAL(cpus_to_shootdown, justme)) {
2131 
2132 #ifdef __xpv
2133                 if (va == DEMAP_ALL_ADDR) {
2134                         xen_flush_tlb();
2135                 } else {
2136                         for (size_t i = 0; i < len; i += MMU_PAGESIZE)
2137                                 xen_flush_va((caddr_t)(va + i));
2138                 }
2139 #else
2140                 (void) hati_demap_func((xc_arg_t)hat,
2141                     (xc_arg_t)va, (xc_arg_t)len);
2142 #endif
2143 
2144         } else {
2145 
2146                 CPUSET_ADD(cpus_to_shootdown, CPU->cpu_id);
2147 #ifdef __xpv
2148                 if (va == DEMAP_ALL_ADDR) {
2149                         xen_gflush_tlb(cpus_to_shootdown);
2150                 } else {
2151                         for (size_t i = 0; i < len; i += MMU_PAGESIZE) {
2152                                 xen_gflush_va((caddr_t)(va + i),
2153                                     cpus_to_shootdown);
2154                         }
2155                 }
2156 #else
2157                 xc_call((xc_arg_t)hat, (xc_arg_t)va, (xc_arg_t)len,
2158                     CPUSET2BV(cpus_to_shootdown), hati_demap_func);
2159 #endif
2160 
2161         }
2162         kpreempt_enable();
2163 }
2164 
2165 void
2166 hat_tlb_inval(hat_t *hat, uintptr_t va)
2167 {
2168         hat_tlb_inval_range(hat, va, MMU_PAGESIZE);
2169 }
2170 
2171 /*
2172  * Interior routine for HAT_UNLOADs from hat_unload_callback(),
2173  * hat_kmap_unload() OR from hat_steal() code.  This routine doesn't
2174  * handle releasing of the htables.
2175  */
2176 void
2177 hat_pte_unmap(
2178         htable_t        *ht,
2179         uint_t          entry,
2180         uint_t          flags,
2181         x86pte_t        old_pte,
2182         void            *pte_ptr,
2183         boolean_t       tlb)
2184 {
2185         hat_t           *hat = ht->ht_hat;
2186         hment_t         *hm = NULL;
2187         page_t          *pp = NULL;
2188         level_t         l = ht->ht_level;
2189         pfn_t           pfn;
2190 
2191         /*
2192          * We always track the locking counts, even if nothing is unmapped
2193          */
2194         if ((flags & HAT_UNLOAD_UNLOCK) != 0 && hat != kas.a_hat) {
2195                 ASSERT(ht->ht_lock_cnt > 0);
2196                 HTABLE_LOCK_DEC(ht);
2197         }
2198 
2199         /*
2200          * Figure out which page's mapping list lock to acquire using the PFN
2201          * passed in "old" PTE. We then attempt to invalidate the PTE.
2202          * If another thread, probably a hat_pageunload, has asynchronously
2203          * unmapped/remapped this address we'll loop here.
2204          */
2205         ASSERT(ht->ht_busy > 0);
2206         while (PTE_ISVALID(old_pte)) {
2207                 pfn = PTE2PFN(old_pte, l);
2208                 if (PTE_GET(old_pte, PT_SOFTWARE) >= PT_NOCONSIST) {
2209                         pp = NULL;
2210                 } else {
2211 #ifdef __xpv
2212                         if (pfn == PFN_INVALID)
2213                                 panic("Invalid PFN, but not PT_NOCONSIST");
2214 #endif
2215                         pp = page_numtopp_nolock(pfn);
2216                         if (pp == NULL) {
2217                                 panic("no page_t, not NOCONSIST: old_pte="
2218                                     FMT_PTE " ht=%lx entry=0x%x pte_ptr=%lx",
2219                                     old_pte, (uintptr_t)ht, entry,
2220                                     (uintptr_t)pte_ptr);
2221                         }
2222                         x86_hm_enter(pp);
2223                 }
2224 
2225                 old_pte = x86pte_inval(ht, entry, old_pte, pte_ptr, tlb);
2226 
2227                 /*
2228                  * If the page hadn't changed we've unmapped it and can proceed
2229                  */
2230                 if (PTE_ISVALID(old_pte) && PTE2PFN(old_pte, l) == pfn)
2231                         break;
2232 
2233                 /*
2234                  * Otherwise, we'll have to retry with the current old_pte.
2235                  * Drop the hment lock, since the pfn may have changed.
2236                  */
2237                 if (pp != NULL) {
2238                         x86_hm_exit(pp);
2239                         pp = NULL;
2240                 } else {
2241                         ASSERT(PTE_GET(old_pte, PT_SOFTWARE) >= PT_NOCONSIST);
2242                 }
2243         }
2244 
2245         /*
2246          * If the old mapping wasn't valid, there's nothing more to do
2247          */
2248         if (!PTE_ISVALID(old_pte)) {
2249                 if (pp != NULL)
2250                         x86_hm_exit(pp);
2251                 return;
2252         }
2253 
2254         /*
2255          * Take care of syncing any MOD/REF bits and removing the hment.
2256          */
2257         if (pp != NULL) {
2258                 if (!(flags & HAT_UNLOAD_NOSYNC))
2259                         hati_sync_pte_to_page(pp, old_pte, l);
2260                 hm = hment_remove(pp, ht, entry);
2261                 x86_hm_exit(pp);
2262                 if (hm != NULL)
2263                         hment_free(hm);
2264         }
2265 
2266         /*
2267          * Handle book keeping in the htable and hat
2268          */
2269         ASSERT(ht->ht_valid_cnt > 0);
2270         HTABLE_DEC(ht->ht_valid_cnt);
2271         PGCNT_DEC(hat, l);
2272 }
2273 
2274 /*
2275  * very cheap unload implementation to special case some kernel addresses
2276  */
2277 static void
2278 hat_kmap_unload(caddr_t addr, size_t len, uint_t flags)
2279 {
2280         uintptr_t       va = (uintptr_t)addr;
2281         uintptr_t       eva = va + len;
2282         pgcnt_t         pg_index;
2283         htable_t        *ht;
2284         uint_t          entry;
2285         x86pte_t        *pte_ptr;
2286         x86pte_t        old_pte;
2287 
2288         for (; va < eva; va += MMU_PAGESIZE) {
2289                 /*
2290                  * Get the PTE
2291                  */
2292                 pg_index = mmu_btop(va - mmu.kmap_addr);
2293                 pte_ptr = PT_INDEX_PTR(mmu.kmap_ptes, pg_index);
2294                 old_pte = GET_PTE(pte_ptr);
2295 
2296                 /*
2297                  * get the htable / entry
2298                  */
2299                 ht = mmu.kmap_htables[(va - mmu.kmap_htables[0]->ht_vaddr)
2300                     >> LEVEL_SHIFT(1)];
2301                 entry = htable_va2entry(va, ht);
2302 
2303                 /*
2304                  * use mostly common code to unmap it.
2305                  */
2306                 hat_pte_unmap(ht, entry, flags, old_pte, pte_ptr, B_TRUE);
2307         }
2308 }
2309 
2310 
2311 /*
2312  * unload a range of virtual address space (no callback)
2313  */
2314 void
2315 hat_unload(hat_t *hat, caddr_t addr, size_t len, uint_t flags)
2316 {
2317         uintptr_t va = (uintptr_t)addr;
2318 
2319         XPV_DISALLOW_MIGRATE();
2320         ASSERT(hat == kas.a_hat || va + len <= _userlimit);
2321 
2322         /*
2323          * special case for performance.
2324          */
2325         if (mmu.kmap_addr <= va && va < mmu.kmap_eaddr) {
2326                 ASSERT(hat == kas.a_hat);
2327                 hat_kmap_unload(addr, len, flags);
2328         } else {
2329                 hat_unload_callback(hat, addr, len, flags, NULL);
2330         }
2331         XPV_ALLOW_MIGRATE();
2332 }
2333 
2334 /*
2335  * Do the callbacks for ranges being unloaded.
2336  */
2337 typedef struct range_info {
2338         uintptr_t       rng_va;
2339         ulong_t         rng_cnt;
2340         level_t         rng_level;
2341 } range_info_t;
2342 
2343 /*
2344  * Invalidate the TLB, and perform the callback to the upper level VM system,
2345  * for the specified ranges of contiguous pages.
2346  */
2347 static void
2348 handle_ranges(hat_t *hat, hat_callback_t *cb, uint_t cnt, range_info_t *range)
2349 {
2350         while (cnt > 0) {
2351                 size_t len;
2352 
2353                 --cnt;
2354                 len = range[cnt].rng_cnt << LEVEL_SHIFT(range[cnt].rng_level);
2355                 hat_tlb_inval_range(hat, (uintptr_t)range[cnt].rng_va, len);
2356 
2357                 if (cb != NULL) {
2358                         cb->hcb_start_addr = (caddr_t)range[cnt].rng_va;
2359                         cb->hcb_end_addr = cb->hcb_start_addr;
2360                         cb->hcb_end_addr += len;
2361                         cb->hcb_function(cb);
2362                 }
2363         }
2364 }
2365 
2366 /*
2367  * Unload a given range of addresses (has optional callback)
2368  *
2369  * Flags:
2370  * define       HAT_UNLOAD              0x00
2371  * define       HAT_UNLOAD_NOSYNC       0x02
2372  * define       HAT_UNLOAD_UNLOCK       0x04
2373  * define       HAT_UNLOAD_OTHER        0x08 - not used
2374  * define       HAT_UNLOAD_UNMAP        0x10 - same as HAT_UNLOAD
2375  */
2376 #define MAX_UNLOAD_CNT (8)
2377 void
2378 hat_unload_callback(
2379         hat_t           *hat,
2380         caddr_t         addr,
2381         size_t          len,
2382         uint_t          flags,
2383         hat_callback_t  *cb)
2384 {
2385         uintptr_t       vaddr = (uintptr_t)addr;
2386         uintptr_t       eaddr = vaddr + len;
2387         htable_t        *ht = NULL;
2388         uint_t          entry;
2389         uintptr_t       contig_va = (uintptr_t)-1L;
2390         range_info_t    r[MAX_UNLOAD_CNT];
2391         uint_t          r_cnt = 0;
2392         x86pte_t        old_pte;
2393 
2394         XPV_DISALLOW_MIGRATE();
2395         ASSERT(hat == kas.a_hat || eaddr <= _userlimit);
2396         ASSERT(IS_PAGEALIGNED(vaddr));
2397         ASSERT(IS_PAGEALIGNED(eaddr));
2398 
2399         /*
2400          * Special case a single page being unloaded for speed. This happens
2401          * quite frequently, COW faults after a fork() for example.
2402          */
2403         if (cb == NULL && len == MMU_PAGESIZE) {
2404                 ht = htable_getpte(hat, vaddr, &entry, &old_pte, 0);
2405                 if (ht != NULL) {
2406                         if (PTE_ISVALID(old_pte)) {
2407                                 hat_pte_unmap(ht, entry, flags, old_pte,
2408                                     NULL, B_TRUE);
2409                         }
2410                         htable_release(ht);
2411                 }
2412                 XPV_ALLOW_MIGRATE();
2413                 return;
2414         }
2415 
2416         while (vaddr < eaddr) {
2417                 old_pte = htable_walk(hat, &ht, &vaddr, eaddr);
2418                 if (ht == NULL)
2419                         break;
2420 
2421                 ASSERT(!IN_VA_HOLE(vaddr));
2422 
2423                 if (vaddr < (uintptr_t)addr)
2424                         panic("hat_unload_callback(): unmap inside large page");
2425 
2426                 /*
2427                  * We'll do the call backs for contiguous ranges
2428                  */
2429                 if (vaddr != contig_va ||
2430                     (r_cnt > 0 && r[r_cnt - 1].rng_level != ht->ht_level)) {
2431                         if (r_cnt == MAX_UNLOAD_CNT) {
2432                                 handle_ranges(hat, cb, r_cnt, r);
2433                                 r_cnt = 0;
2434                         }
2435                         r[r_cnt].rng_va = vaddr;
2436                         r[r_cnt].rng_cnt = 0;
2437                         r[r_cnt].rng_level = ht->ht_level;
2438                         ++r_cnt;
2439                 }
2440 
2441                 /*
2442                  * Unload one mapping (for a single page) from the page tables.
2443                  * Note that we do not remove the mapping from the TLB yet,
2444                  * as indicated by the tlb=FALSE argument to hat_pte_unmap().
2445                  * handle_ranges() will clear the TLB entries with one call to
2446                  * hat_tlb_inval_range() per contiguous range.  This is
2447                  * safe because the page can not be reused until the
2448                  * callback is made (or we return).
2449                  */
2450                 entry = htable_va2entry(vaddr, ht);
2451                 hat_pte_unmap(ht, entry, flags, old_pte, NULL, B_FALSE);
2452                 ASSERT(ht->ht_level <= mmu.max_page_level);
2453                 vaddr += LEVEL_SIZE(ht->ht_level);
2454                 contig_va = vaddr;
2455                 ++r[r_cnt - 1].rng_cnt;
2456         }
2457         if (ht)
2458                 htable_release(ht);
2459 
2460         /*
2461          * handle last range for callbacks
2462          */
2463         if (r_cnt > 0)
2464                 handle_ranges(hat, cb, r_cnt, r);
2465         XPV_ALLOW_MIGRATE();
2466 }
2467 
2468 /*
2469  * Invalidate a virtual address translation on a slave CPU during
2470  * panic() dumps.
2471  */
2472 void
2473 hat_flush_range(hat_t *hat, caddr_t va, size_t size)
2474 {
2475         ssize_t sz;
2476         caddr_t endva = va + size;
2477 
2478         while (va < endva) {
2479                 sz = hat_getpagesize(hat, va);
2480                 if (sz < 0) {
2481 #ifdef __xpv
2482                         xen_flush_tlb();
2483 #else
2484                         flush_all_tlb_entries();
2485 #endif
2486                         break;
2487                 }
2488 #ifdef __xpv
2489                 xen_flush_va(va);
2490 #else
2491                 mmu_tlbflush_entry(va);
2492 #endif
2493                 va += sz;
2494         }
2495 }
2496 
2497 /*
2498  * synchronize mapping with software data structures
2499  *
2500  * This interface is currently only used by the working set monitor
2501  * driver.
2502  */
2503 /*ARGSUSED*/
2504 void
2505 hat_sync(hat_t *hat, caddr_t addr, size_t len, uint_t flags)
2506 {
2507         uintptr_t       vaddr = (uintptr_t)addr;
2508         uintptr_t       eaddr = vaddr + len;
2509         htable_t        *ht = NULL;
2510         uint_t          entry;
2511         x86pte_t        pte;
2512         x86pte_t        save_pte;
2513         x86pte_t        new;
2514         page_t          *pp;
2515 
2516         ASSERT(!IN_VA_HOLE(vaddr));
2517         ASSERT(IS_PAGEALIGNED(vaddr));
2518         ASSERT(IS_PAGEALIGNED(eaddr));
2519         ASSERT(hat == kas.a_hat || eaddr <= _userlimit);
2520 
2521         XPV_DISALLOW_MIGRATE();
2522         for (; vaddr < eaddr; vaddr += LEVEL_SIZE(ht->ht_level)) {
2523 try_again:
2524                 pte = htable_walk(hat, &ht, &vaddr, eaddr);
2525                 if (ht == NULL)
2526                         break;
2527                 entry = htable_va2entry(vaddr, ht);
2528 
2529                 if (PTE_GET(pte, PT_SOFTWARE) >= PT_NOSYNC ||
2530                     PTE_GET(pte, PT_REF | PT_MOD) == 0)
2531                         continue;
2532 
2533                 /*
2534                  * We need to acquire the mapping list lock to protect
2535                  * against hat_pageunload(), hat_unload(), etc.
2536                  */
2537                 pp = page_numtopp_nolock(PTE2PFN(pte, ht->ht_level));
2538                 if (pp == NULL)
2539                         break;
2540                 x86_hm_enter(pp);
2541                 save_pte = pte;
2542                 pte = x86pte_get(ht, entry);
2543                 if (pte != save_pte) {
2544                         x86_hm_exit(pp);
2545                         goto try_again;
2546                 }
2547                 if (PTE_GET(pte, PT_SOFTWARE) >= PT_NOSYNC ||
2548                     PTE_GET(pte, PT_REF | PT_MOD) == 0) {
2549                         x86_hm_exit(pp);
2550                         continue;
2551                 }
2552 
2553                 /*
2554                  * Need to clear ref or mod bits. We may compete with
2555                  * hardware updating the R/M bits and have to try again.
2556                  */
2557                 if (flags == HAT_SYNC_ZERORM) {
2558                         new = pte;
2559                         PTE_CLR(new, PT_REF | PT_MOD);
2560                         pte = hati_update_pte(ht, entry, pte, new);
2561                         if (pte != 0) {
2562                                 x86_hm_exit(pp);
2563                                 goto try_again;
2564                         }
2565                 } else {
2566                         /*
2567                          * sync the PTE to the page_t
2568                          */
2569                         hati_sync_pte_to_page(pp, save_pte, ht->ht_level);
2570                 }
2571                 x86_hm_exit(pp);
2572         }
2573         if (ht)
2574                 htable_release(ht);
2575         XPV_ALLOW_MIGRATE();
2576 }
2577 
2578 /*
2579  * void hat_map(hat, addr, len, flags)
2580  */
2581 /*ARGSUSED*/
2582 void
2583 hat_map(hat_t *hat, caddr_t addr, size_t len, uint_t flags)
2584 {
2585         /* does nothing */
2586 }
2587 
2588 /*
2589  * uint_t hat_getattr(hat, addr, *attr)
2590  *      returns attr for <hat,addr> in *attr.  returns 0 if there was a
2591  *      mapping and *attr is valid, nonzero if there was no mapping and
2592  *      *attr is not valid.
2593  */
2594 uint_t
2595 hat_getattr(hat_t *hat, caddr_t addr, uint_t *attr)
2596 {
2597         uintptr_t       vaddr = ALIGN2PAGE(addr);
2598         htable_t        *ht = NULL;
2599         x86pte_t        pte;
2600 
2601         ASSERT(hat == kas.a_hat || vaddr <= _userlimit);
2602 
2603         if (IN_VA_HOLE(vaddr))
2604                 return ((uint_t)-1);
2605 
2606         ht = htable_getpte(hat, vaddr, NULL, &pte, mmu.max_page_level);
2607         if (ht == NULL)
2608                 return ((uint_t)-1);
2609 
2610         if (!PTE_ISVALID(pte) || !PTE_ISPAGE(pte, ht->ht_level)) {
2611                 htable_release(ht);
2612                 return ((uint_t)-1);
2613         }
2614 
2615         *attr = PROT_READ;
2616         if (PTE_GET(pte, PT_WRITABLE))
2617                 *attr |= PROT_WRITE;
2618         if (PTE_GET(pte, PT_USER))
2619                 *attr |= PROT_USER;
2620         if (!PTE_GET(pte, mmu.pt_nx))
2621                 *attr |= PROT_EXEC;
2622         if (PTE_GET(pte, PT_SOFTWARE) >= PT_NOSYNC)
2623                 *attr |= HAT_NOSYNC;
2624         htable_release(ht);
2625         return (0);
2626 }
2627 
2628 /*
2629  * hat_updateattr() applies the given attribute change to an existing mapping
2630  */
2631 #define HAT_LOAD_ATTR           1
2632 #define HAT_SET_ATTR            2
2633 #define HAT_CLR_ATTR            3
2634 
2635 static void
2636 hat_updateattr(hat_t *hat, caddr_t addr, size_t len, uint_t attr, int what)
2637 {
2638         uintptr_t       vaddr = (uintptr_t)addr;
2639         uintptr_t       eaddr = (uintptr_t)addr + len;
2640         htable_t        *ht = NULL;
2641         uint_t          entry;
2642         x86pte_t        oldpte, newpte;
2643         page_t          *pp;
2644 
2645         XPV_DISALLOW_MIGRATE();
2646         ASSERT(IS_PAGEALIGNED(vaddr));
2647         ASSERT(IS_PAGEALIGNED(eaddr));
2648         ASSERT(hat == kas.a_hat ||
2649             AS_LOCK_HELD(hat->hat_as, &hat->hat_as->a_lock));
2650         for (; vaddr < eaddr; vaddr += LEVEL_SIZE(ht->ht_level)) {
2651 try_again:
2652                 oldpte = htable_walk(hat, &ht, &vaddr, eaddr);
2653                 if (ht == NULL)
2654                         break;
2655                 if (PTE_GET(oldpte, PT_SOFTWARE) >= PT_NOCONSIST)
2656                         continue;
2657 
2658                 pp = page_numtopp_nolock(PTE2PFN(oldpte, ht->ht_level));
2659                 if (pp == NULL)
2660                         continue;
2661                 x86_hm_enter(pp);
2662 
2663                 newpte = oldpte;
2664                 /*
2665                  * We found a page table entry in the desired range,
2666                  * figure out the new attributes.
2667                  */
2668                 if (what == HAT_SET_ATTR || what == HAT_LOAD_ATTR) {
2669                         if ((attr & PROT_WRITE) &&
2670                             !PTE_GET(oldpte, PT_WRITABLE))
2671                                 newpte |= PT_WRITABLE;
2672 
2673                         if ((attr & HAT_NOSYNC) &&
2674                             PTE_GET(oldpte, PT_SOFTWARE) < PT_NOSYNC)
2675                                 newpte |= PT_NOSYNC;
2676 
2677                         if ((attr & PROT_EXEC) && PTE_GET(oldpte, mmu.pt_nx))
2678                                 newpte &= ~mmu.pt_nx;
2679                 }
2680 
2681                 if (what == HAT_LOAD_ATTR) {
2682                         if (!(attr & PROT_WRITE) &&
2683                             PTE_GET(oldpte, PT_WRITABLE))
2684                                 newpte &= ~PT_WRITABLE;
2685 
2686                         if (!(attr & HAT_NOSYNC) &&
2687                             PTE_GET(oldpte, PT_SOFTWARE) >= PT_NOSYNC)
2688                                 newpte &= ~PT_SOFTWARE;
2689 
2690                         if (!(attr & PROT_EXEC) && !PTE_GET(oldpte, mmu.pt_nx))
2691                                 newpte |= mmu.pt_nx;
2692                 }
2693 
2694                 if (what == HAT_CLR_ATTR) {
2695                         if ((attr & PROT_WRITE) && PTE_GET(oldpte, PT_WRITABLE))
2696                                 newpte &= ~PT_WRITABLE;
2697 
2698                         if ((attr & HAT_NOSYNC) &&
2699                             PTE_GET(oldpte, PT_SOFTWARE) >= PT_NOSYNC)
2700                                 newpte &= ~PT_SOFTWARE;
2701 
2702                         if ((attr & PROT_EXEC) && !PTE_GET(oldpte, mmu.pt_nx))
2703                                 newpte |= mmu.pt_nx;
2704                 }
2705 
2706                 /*
2707                  * Ensure NOSYNC/NOCONSIST mappings have REF and MOD set.
2708                  * x86pte_set() depends on this.
2709                  */
2710                 if (PTE_GET(newpte, PT_SOFTWARE) >= PT_NOSYNC)
2711                         newpte |= PT_REF | PT_MOD;
2712 
2713                 /*
2714                  * what about PROT_READ or others? this code only handles:
2715                  * EXEC, WRITE, NOSYNC
2716                  */
2717 
2718                 /*
2719                  * If new PTE really changed, update the table.
2720                  */
2721                 if (newpte != oldpte) {
2722                         entry = htable_va2entry(vaddr, ht);
2723                         oldpte = hati_update_pte(ht, entry, oldpte, newpte);
2724                         if (oldpte != 0) {
2725                                 x86_hm_exit(pp);
2726                                 goto try_again;
2727                         }
2728                 }
2729                 x86_hm_exit(pp);
2730         }
2731         if (ht)
2732                 htable_release(ht);
2733         XPV_ALLOW_MIGRATE();
2734 }
2735 
2736 /*
2737  * Various wrappers for hat_updateattr()
2738  */
2739 void
2740 hat_setattr(hat_t *hat, caddr_t addr, size_t len, uint_t attr)
2741 {
2742         ASSERT(hat == kas.a_hat || (uintptr_t)addr + len <= _userlimit);
2743         hat_updateattr(hat, addr, len, attr, HAT_SET_ATTR);
2744 }
2745 
2746 void
2747 hat_clrattr(hat_t *hat, caddr_t addr, size_t len, uint_t attr)
2748 {
2749         ASSERT(hat == kas.a_hat || (uintptr_t)addr + len <= _userlimit);
2750         hat_updateattr(hat, addr, len, attr, HAT_CLR_ATTR);
2751 }
2752 
2753 void
2754 hat_chgattr(hat_t *hat, caddr_t addr, size_t len, uint_t attr)
2755 {
2756         ASSERT(hat == kas.a_hat || (uintptr_t)addr + len <= _userlimit);
2757         hat_updateattr(hat, addr, len, attr, HAT_LOAD_ATTR);
2758 }
2759 
2760 void
2761 hat_chgprot(hat_t *hat, caddr_t addr, size_t len, uint_t vprot)
2762 {
2763         ASSERT(hat == kas.a_hat || (uintptr_t)addr + len <= _userlimit);
2764         hat_updateattr(hat, addr, len, vprot & HAT_PROT_MASK, HAT_LOAD_ATTR);
2765 }
2766 
2767 /*
2768  * size_t hat_getpagesize(hat, addr)
2769  *      returns pagesize in bytes for <hat, addr>. returns -1 of there is
2770  *      no mapping. This is an advisory call.
2771  */
2772 ssize_t
2773 hat_getpagesize(hat_t *hat, caddr_t addr)
2774 {
2775         uintptr_t       vaddr = ALIGN2PAGE(addr);
2776         htable_t        *ht;
2777         size_t          pagesize;
2778 
2779         ASSERT(hat == kas.a_hat || vaddr <= _userlimit);
2780         if (IN_VA_HOLE(vaddr))
2781                 return (-1);
2782         ht = htable_getpage(hat, vaddr, NULL);
2783         if (ht == NULL)
2784                 return (-1);
2785         pagesize = LEVEL_SIZE(ht->ht_level);
2786         htable_release(ht);
2787         return (pagesize);
2788 }
2789 
2790 
2791 
2792 /*
2793  * pfn_t hat_getpfnum(hat, addr)
2794  *      returns pfn for <hat, addr> or PFN_INVALID if mapping is invalid.
2795  */
2796 pfn_t
2797 hat_getpfnum(hat_t *hat, caddr_t addr)
2798 {
2799         uintptr_t       vaddr = ALIGN2PAGE(addr);
2800         htable_t        *ht;
2801         uint_t          entry;
2802         pfn_t           pfn = PFN_INVALID;
2803 
2804         ASSERT(hat == kas.a_hat || vaddr <= _userlimit);
2805         if (khat_running == 0)
2806                 return (PFN_INVALID);
2807 
2808         if (IN_VA_HOLE(vaddr))
2809                 return (PFN_INVALID);
2810 
2811         XPV_DISALLOW_MIGRATE();
2812         /*
2813          * A very common use of hat_getpfnum() is from the DDI for kernel pages.
2814          * Use the kmap_ptes (which also covers the 32 bit heap) to speed
2815          * this up.
2816          */
2817         if (mmu.kmap_addr <= vaddr && vaddr < mmu.kmap_eaddr) {
2818                 x86pte_t pte;
2819                 pgcnt_t pg_index;
2820 
2821                 pg_index = mmu_btop(vaddr - mmu.kmap_addr);
2822                 pte = GET_PTE(PT_INDEX_PTR(mmu.kmap_ptes, pg_index));
2823                 if (PTE_ISVALID(pte))
2824                         /*LINTED [use of constant 0 causes a lint warning] */
2825                         pfn = PTE2PFN(pte, 0);
2826                 XPV_ALLOW_MIGRATE();
2827                 return (pfn);
2828         }
2829 
2830         ht = htable_getpage(hat, vaddr, &entry);
2831         if (ht == NULL) {
2832                 XPV_ALLOW_MIGRATE();
2833                 return (PFN_INVALID);
2834         }
2835         ASSERT(vaddr >= ht->ht_vaddr);
2836         ASSERT(vaddr <= HTABLE_LAST_PAGE(ht));
2837         pfn = PTE2PFN(x86pte_get(ht, entry), ht->ht_level);
2838         if (ht->ht_level > 0)
2839                 pfn += mmu_btop(vaddr & LEVEL_OFFSET(ht->ht_level));
2840         htable_release(ht);
2841         XPV_ALLOW_MIGRATE();
2842         return (pfn);
2843 }
2844 
2845 /*
2846  * int hat_probe(hat, addr)
2847  *      return 0 if no valid mapping is present.  Faster version
2848  *      of hat_getattr in certain architectures.
2849  */
2850 int
2851 hat_probe(hat_t *hat, caddr_t addr)
2852 {
2853         uintptr_t       vaddr = ALIGN2PAGE(addr);
2854         uint_t          entry;
2855         htable_t        *ht;
2856         pgcnt_t         pg_off;
2857 
2858         ASSERT(hat == kas.a_hat || vaddr <= _userlimit);
2859         ASSERT(hat == kas.a_hat ||
2860             AS_LOCK_HELD(hat->hat_as, &hat->hat_as->a_lock));
2861         if (IN_VA_HOLE(vaddr))
2862                 return (0);
2863 
2864         /*
2865          * Most common use of hat_probe is from segmap. We special case it
2866          * for performance.
2867          */
2868         if (mmu.kmap_addr <= vaddr && vaddr < mmu.kmap_eaddr) {
2869                 pg_off = mmu_btop(vaddr - mmu.kmap_addr);
2870                 if (mmu.pae_hat)
2871                         return (PTE_ISVALID(mmu.kmap_ptes[pg_off]));
2872                 else
2873                         return (PTE_ISVALID(
2874                             ((x86pte32_t *)mmu.kmap_ptes)[pg_off]));
2875         }
2876 
2877         ht = htable_getpage(hat, vaddr, &entry);
2878         htable_release(ht);
2879         return (ht != NULL);
2880 }
2881 
2882 /*
2883  * Find out if the segment for hat_share()/hat_unshare() is DISM or locked ISM.
2884  */
2885 static int
2886 is_it_dism(hat_t *hat, caddr_t va)
2887 {
2888         struct seg *seg;
2889         struct shm_data *shmd;
2890         struct spt_data *sptd;
2891 
2892         seg = as_findseg(hat->hat_as, va, 0);
2893         ASSERT(seg != NULL);
2894         ASSERT(seg->s_base <= va);
2895         shmd = (struct shm_data *)seg->s_data;
2896         ASSERT(shmd != NULL);
2897         sptd = (struct spt_data *)shmd->shm_sptseg->s_data;
2898         ASSERT(sptd != NULL);
2899         if (sptd->spt_flags & SHM_PAGEABLE)
2900                 return (1);
2901         return (0);
2902 }
2903 
2904 /*
2905  * Simple implementation of ISM. hat_share() is similar to hat_memload_array(),
2906  * except that we use the ism_hat's existing mappings to determine the pages
2907  * and protections to use for this hat. If we find a full properly aligned
2908  * and sized pagetable, we will attempt to share the pagetable itself.
2909  */
2910 /*ARGSUSED*/
2911 int
2912 hat_share(
2913         hat_t           *hat,
2914         caddr_t         addr,
2915         hat_t           *ism_hat,
2916         caddr_t         src_addr,
2917         size_t          len,    /* almost useless value, see below.. */
2918         uint_t          ismszc)
2919 {
2920         uintptr_t       vaddr_start = (uintptr_t)addr;
2921         uintptr_t       vaddr;
2922         uintptr_t       eaddr = vaddr_start + len;
2923         uintptr_t       ism_addr_start = (uintptr_t)src_addr;
2924         uintptr_t       ism_addr = ism_addr_start;
2925         uintptr_t       e_ism_addr = ism_addr + len;
2926         htable_t        *ism_ht = NULL;
2927         htable_t        *ht;
2928         x86pte_t        pte;
2929         page_t          *pp;
2930         pfn_t           pfn;
2931         level_t         l;
2932         pgcnt_t         pgcnt;
2933         uint_t          prot;
2934         int             is_dism;
2935         int             flags;
2936 
2937         /*
2938          * We might be asked to share an empty DISM hat by as_dup()
2939          */
2940         ASSERT(hat != kas.a_hat);
2941         ASSERT(eaddr <= _userlimit);
2942         if (!(ism_hat->hat_flags & HAT_SHARED)) {
2943                 ASSERT(hat_get_mapped_size(ism_hat) == 0);
2944                 return (0);
2945         }
2946         XPV_DISALLOW_MIGRATE();
2947 
2948         /*
2949          * The SPT segment driver often passes us a size larger than there are
2950          * valid mappings. That's because it rounds the segment size up to a
2951          * large pagesize, even if the actual memory mapped by ism_hat is less.
2952          */
2953         ASSERT(IS_PAGEALIGNED(vaddr_start));
2954         ASSERT(IS_PAGEALIGNED(ism_addr_start));
2955         ASSERT(ism_hat->hat_flags & HAT_SHARED);
2956         is_dism = is_it_dism(hat, addr);
2957         while (ism_addr < e_ism_addr) {
2958                 /*
2959                  * use htable_walk to get the next valid ISM mapping
2960                  */
2961                 pte = htable_walk(ism_hat, &ism_ht, &ism_addr, e_ism_addr);
2962                 if (ism_ht == NULL)
2963                         break;
2964 
2965                 /*
2966                  * First check to see if we already share the page table.
2967                  */
2968                 l = ism_ht->ht_level;
2969                 vaddr = vaddr_start + (ism_addr - ism_addr_start);
2970                 ht = htable_lookup(hat, vaddr, l);
2971                 if (ht != NULL) {
2972                         if (ht->ht_flags & HTABLE_SHARED_PFN)
2973                                 goto shared;
2974                         htable_release(ht);
2975                         goto not_shared;
2976                 }
2977 
2978                 /*
2979                  * Can't ever share top table.
2980                  */
2981                 if (l == mmu.max_level)
2982                         goto not_shared;
2983 
2984                 /*
2985                  * Avoid level mismatches later due to DISM faults.
2986                  */
2987                 if (is_dism && l > 0)
2988                         goto not_shared;
2989 
2990                 /*
2991                  * addresses and lengths must align
2992                  * table must be fully populated
2993                  * no lower level page tables
2994                  */
2995                 if (ism_addr != ism_ht->ht_vaddr ||
2996                     (vaddr & LEVEL_OFFSET(l + 1)) != 0)
2997                         goto not_shared;
2998 
2999                 /*
3000                  * The range of address space must cover a full table.
3001                  */
3002                 if (e_ism_addr - ism_addr < LEVEL_SIZE(l + 1))
3003                         goto not_shared;
3004 
3005                 /*
3006                  * All entries in the ISM page table must be leaf PTEs.
3007                  */
3008                 if (l > 0) {
3009                         int e;
3010 
3011                         /*
3012                          * We know the 0th is from htable_walk() above.
3013                          */
3014                         for (e = 1; e < HTABLE_NUM_PTES(ism_ht); ++e) {
3015                                 x86pte_t pte;
3016                                 pte = x86pte_get(ism_ht, e);
3017                                 if (!PTE_ISPAGE(pte, l))
3018                                         goto not_shared;
3019                         }
3020                 }
3021 
3022                 /*
3023                  * share the page table
3024                  */
3025                 ht = htable_create(hat, vaddr, l, ism_ht);
3026 shared:
3027                 ASSERT(ht->ht_flags & HTABLE_SHARED_PFN);
3028                 ASSERT(ht->ht_shares == ism_ht);
3029                 hat->hat_ism_pgcnt +=
3030                     (ism_ht->ht_valid_cnt - ht->ht_valid_cnt) <<
3031                     (LEVEL_SHIFT(ht->ht_level) - MMU_PAGESHIFT);
3032                 ht->ht_valid_cnt = ism_ht->ht_valid_cnt;
3033                 htable_release(ht);
3034                 ism_addr = ism_ht->ht_vaddr + LEVEL_SIZE(l + 1);
3035                 htable_release(ism_ht);
3036                 ism_ht = NULL;
3037                 continue;
3038 
3039 not_shared:
3040                 /*
3041                  * Unable to share the page table. Instead we will
3042                  * create new mappings from the values in the ISM mappings.
3043                  * Figure out what level size mappings to use;
3044                  */
3045                 for (l = ism_ht->ht_level; l > 0; --l) {
3046                         if (LEVEL_SIZE(l) <= eaddr - vaddr &&
3047                             (vaddr & LEVEL_OFFSET(l)) == 0)
3048                                 break;
3049                 }
3050 
3051                 /*
3052                  * The ISM mapping might be larger than the share area,
3053                  * be careful to truncate it if needed.
3054                  */
3055                 if (eaddr - vaddr >= LEVEL_SIZE(ism_ht->ht_level)) {
3056                         pgcnt = mmu_btop(LEVEL_SIZE(ism_ht->ht_level));
3057                 } else {
3058                         pgcnt = mmu_btop(eaddr - vaddr);
3059                         l = 0;
3060                 }
3061 
3062                 pfn = PTE2PFN(pte, ism_ht->ht_level);
3063                 ASSERT(pfn != PFN_INVALID);
3064                 while (pgcnt > 0) {
3065                         /*
3066                          * Make a new pte for the PFN for this level.
3067                          * Copy protections for the pte from the ISM pte.
3068                          */
3069                         pp = page_numtopp_nolock(pfn);
3070                         ASSERT(pp != NULL);
3071 
3072                         prot = PROT_USER | PROT_READ | HAT_UNORDERED_OK;
3073                         if (PTE_GET(pte, PT_WRITABLE))
3074                                 prot |= PROT_WRITE;
3075                         if (!PTE_GET(pte, PT_NX))
3076                                 prot |= PROT_EXEC;
3077 
3078                         flags = HAT_LOAD;
3079                         if (!is_dism)
3080                                 flags |= HAT_LOAD_LOCK | HAT_LOAD_NOCONSIST;
3081                         while (hati_load_common(hat, vaddr, pp, prot, flags,
3082                             l, pfn) != 0) {
3083                                 if (l == 0)
3084                                         panic("hati_load_common() failure");
3085                                 --l;
3086                         }
3087 
3088                         vaddr += LEVEL_SIZE(l);
3089                         ism_addr += LEVEL_SIZE(l);
3090                         pfn += mmu_btop(LEVEL_SIZE(l));
3091                         pgcnt -= mmu_btop(LEVEL_SIZE(l));
3092                 }
3093         }
3094         if (ism_ht != NULL)
3095                 htable_release(ism_ht);
3096         XPV_ALLOW_MIGRATE();
3097         return (0);
3098 }
3099 
3100 
3101 /*
3102  * hat_unshare() is similar to hat_unload_callback(), but
3103  * we have to look for empty shared pagetables. Note that
3104  * hat_unshare() is always invoked against an entire segment.
3105  */
3106 /*ARGSUSED*/
3107 void
3108 hat_unshare(hat_t *hat, caddr_t addr, size_t len, uint_t ismszc)
3109 {
3110         uint64_t        vaddr = (uintptr_t)addr;
3111         uintptr_t       eaddr = vaddr + len;
3112         htable_t        *ht = NULL;
3113         uint_t          need_demaps = 0;
3114         int             flags = HAT_UNLOAD_UNMAP;
3115         level_t         l;
3116 
3117         ASSERT(hat != kas.a_hat);
3118         ASSERT(eaddr <= _userlimit);
3119         ASSERT(IS_PAGEALIGNED(vaddr));
3120         ASSERT(IS_PAGEALIGNED(eaddr));
3121         XPV_DISALLOW_MIGRATE();
3122 
3123         /*
3124          * First go through and remove any shared pagetables.
3125          *
3126          * Note that it's ok to delay the TLB shootdown till the entire range is
3127          * finished, because if hat_pageunload() were to unload a shared
3128          * pagetable page, its hat_tlb_inval() will do a global TLB invalidate.
3129          */
3130         l = mmu.max_page_level;
3131         if (l == mmu.max_level)
3132                 --l;
3133         for (; l >= 0; --l) {
3134                 for (vaddr = (uintptr_t)addr; vaddr < eaddr;
3135                     vaddr = (vaddr & LEVEL_MASK(l + 1)) + LEVEL_SIZE(l + 1)) {
3136                         ASSERT(!IN_VA_HOLE(vaddr));
3137                         /*
3138                          * find a pagetable that maps the current address
3139                          */
3140                         ht = htable_lookup(hat, vaddr, l);
3141                         if (ht == NULL)
3142                                 continue;
3143                         if (ht->ht_flags & HTABLE_SHARED_PFN) {
3144                                 /*
3145                                  * clear page count, set valid_cnt to 0,
3146                                  * let htable_release() finish the job
3147                                  */
3148                                 hat->hat_ism_pgcnt -= ht->ht_valid_cnt <<
3149                                     (LEVEL_SHIFT(ht->ht_level) - MMU_PAGESHIFT);
3150                                 ht->ht_valid_cnt = 0;
3151                                 need_demaps = 1;
3152                         }
3153                         htable_release(ht);
3154                 }
3155         }
3156 
3157         /*
3158          * flush the TLBs - since we're probably dealing with MANY mappings
3159          * we do just one CR3 reload.
3160          */
3161         if (!(hat->hat_flags & HAT_FREEING) && need_demaps)
3162                 hat_tlb_inval(hat, DEMAP_ALL_ADDR);
3163 
3164         /*
3165          * Now go back and clean up any unaligned mappings that
3166          * couldn't share pagetables.
3167          */
3168         if (!is_it_dism(hat, addr))
3169                 flags |= HAT_UNLOAD_UNLOCK;
3170         hat_unload(hat, addr, len, flags);
3171         XPV_ALLOW_MIGRATE();
3172 }
3173 
3174 
3175 /*
3176  * hat_reserve() does nothing
3177  */
3178 /*ARGSUSED*/
3179 void
3180 hat_reserve(struct as *as, caddr_t addr, size_t len)
3181 {
3182 }
3183 
3184 
3185 /*
3186  * Called when all mappings to a page should have write permission removed.
3187  * Mostly stolen from hat_pagesync()
3188  */
3189 static void
3190 hati_page_clrwrt(struct page *pp)
3191 {
3192         hment_t         *hm = NULL;
3193         htable_t        *ht;
3194         uint_t          entry;
3195         x86pte_t        old;
3196         x86pte_t        new;
3197         uint_t          pszc = 0;
3198 
3199         XPV_DISALLOW_MIGRATE();
3200 next_size:
3201         /*
3202          * walk thru the mapping list clearing write permission
3203          */
3204         x86_hm_enter(pp);
3205         while ((hm = hment_walk(pp, &ht, &entry, hm)) != NULL) {
3206                 if (ht->ht_level < pszc)
3207                         continue;
3208                 old = x86pte_get(ht, entry);
3209 
3210                 for (;;) {
3211                         /*
3212                          * Is this mapping of interest?
3213                          */
3214                         if (PTE2PFN(old, ht->ht_level) != pp->p_pagenum ||
3215                             PTE_GET(old, PT_WRITABLE) == 0)
3216                                 break;
3217 
3218                         /*
3219                          * Clear ref/mod writable bits. This requires cross
3220                          * calls to ensure any executing TLBs see cleared bits.
3221                          */
3222                         new = old;
3223                         PTE_CLR(new, PT_REF | PT_MOD | PT_WRITABLE);
3224                         old = hati_update_pte(ht, entry, old, new);
3225                         if (old != 0)
3226                                 continue;
3227 
3228                         break;
3229                 }
3230         }
3231         x86_hm_exit(pp);
3232         while (pszc < pp->p_szc) {
3233                 page_t *tpp;
3234                 pszc++;
3235                 tpp = PP_GROUPLEADER(pp, pszc);
3236                 if (pp != tpp) {
3237                         pp = tpp;
3238                         goto next_size;
3239                 }
3240         }
3241         XPV_ALLOW_MIGRATE();
3242 }
3243 
3244 /*
3245  * void hat_page_setattr(pp, flag)
3246  * void hat_page_clrattr(pp, flag)
3247  *      used to set/clr ref/mod bits.
3248  */
3249 void
3250 hat_page_setattr(struct page *pp, uint_t flag)
3251 {
3252         vnode_t         *vp = pp->p_vnode;
3253         kmutex_t        *vphm = NULL;
3254         page_t          **listp;
3255         int             noshuffle;
3256 
3257         noshuffle = flag & P_NSH;
3258         flag &= ~P_NSH;
3259 
3260         if (PP_GETRM(pp, flag) == flag)
3261                 return;
3262 
3263         if ((flag & P_MOD) != 0 && vp != NULL && IS_VMODSORT(vp) &&
3264             !noshuffle) {
3265                 vphm = page_vnode_mutex(vp);
3266                 mutex_enter(vphm);
3267         }
3268 
3269         PP_SETRM(pp, flag);
3270 
3271         if (vphm != NULL) {
3272 
3273                 /*
3274                  * Some File Systems examine v_pages for NULL w/o
3275                  * grabbing the vphm mutex. Must not let it become NULL when
3276                  * pp is the only page on the list.
3277                  */
3278                 if (pp->p_vpnext != pp) {
3279                         page_vpsub(&vp->v_pages, pp);
3280                         if (vp->v_pages != NULL)
3281                                 listp = &vp->v_pages->p_vpprev->p_vpnext;
3282                         else
3283                                 listp = &vp->v_pages;
3284                         page_vpadd(listp, pp);
3285                 }
3286                 mutex_exit(vphm);
3287         }
3288 }
3289 
3290 void
3291 hat_page_clrattr(struct page *pp, uint_t flag)
3292 {
3293         vnode_t         *vp = pp->p_vnode;
3294         ASSERT(!(flag & ~(P_MOD | P_REF | P_RO)));
3295 
3296         /*
3297          * Caller is expected to hold page's io lock for VMODSORT to work
3298          * correctly with pvn_vplist_dirty() and pvn_getdirty() when mod
3299          * bit is cleared.
3300          * We don't have assert to avoid tripping some existing third party
3301          * code. The dirty page is moved back to top of the v_page list
3302          * after IO is done in pvn_write_done().
3303          */
3304         PP_CLRRM(pp, flag);
3305 
3306         if ((flag & P_MOD) != 0 && vp != NULL && IS_VMODSORT(vp)) {
3307 
3308                 /*
3309                  * VMODSORT works by removing write permissions and getting
3310                  * a fault when a page is made dirty. At this point
3311                  * we need to remove write permission from all mappings
3312                  * to this page.
3313                  */
3314                 hati_page_clrwrt(pp);
3315         }
3316 }
3317 
3318 /*
3319  *      If flag is specified, returns 0 if attribute is disabled
3320  *      and non zero if enabled.  If flag specifes multiple attributes
3321  *      then returns 0 if ALL attributes are disabled.  This is an advisory
3322  *      call.
3323  */
3324 uint_t
3325 hat_page_getattr(struct page *pp, uint_t flag)
3326 {
3327         return (PP_GETRM(pp, flag));
3328 }
3329 
3330 
3331 /*
3332  * common code used by hat_pageunload() and hment_steal()
3333  */
3334 hment_t *
3335 hati_page_unmap(page_t *pp, htable_t *ht, uint_t entry)
3336 {
3337         x86pte_t old_pte;
3338         pfn_t pfn = pp->p_pagenum;
3339         hment_t *hm;
3340 
3341         /*
3342          * We need to acquire a hold on the htable in order to
3343          * do the invalidate. We know the htable must exist, since
3344          * unmap's don't release the htable until after removing any
3345          * hment. Having x86_hm_enter() keeps that from proceeding.
3346          */
3347         htable_acquire(ht);
3348 
3349         /*
3350          * Invalidate the PTE and remove the hment.
3351          */
3352         old_pte = x86pte_inval(ht, entry, 0, NULL, B_TRUE);
3353         if (PTE2PFN(old_pte, ht->ht_level) != pfn) {
3354                 panic("x86pte_inval() failure found PTE = " FMT_PTE
3355                     " pfn being unmapped is %lx ht=0x%lx entry=0x%x",
3356                     old_pte, pfn, (uintptr_t)ht, entry);
3357         }
3358 
3359         /*
3360          * Clean up all the htable information for this mapping
3361          */
3362         ASSERT(ht->ht_valid_cnt > 0);
3363         HTABLE_DEC(ht->ht_valid_cnt);
3364         PGCNT_DEC(ht->ht_hat, ht->ht_level);
3365 
3366         /*
3367          * sync ref/mod bits to the page_t
3368          */
3369         if (PTE_GET(old_pte, PT_SOFTWARE) < PT_NOSYNC)
3370                 hati_sync_pte_to_page(pp, old_pte, ht->ht_level);
3371 
3372         /*
3373          * Remove the mapping list entry for this page.
3374          */
3375         hm = hment_remove(pp, ht, entry);
3376 
3377         /*
3378          * drop the mapping list lock so that we might free the
3379          * hment and htable.
3380          */
3381         x86_hm_exit(pp);
3382         htable_release(ht);
3383         return (hm);
3384 }
3385 
3386 extern int      vpm_enable;
3387 /*
3388  * Unload all translations to a page. If the page is a subpage of a large
3389  * page, the large page mappings are also removed.
3390  *
3391  * The forceflags are unused.
3392  */
3393 
3394 /*ARGSUSED*/
3395 static int
3396 hati_pageunload(struct page *pp, uint_t pg_szcd, uint_t forceflag)
3397 {
3398         page_t          *cur_pp = pp;
3399         hment_t         *hm;
3400         hment_t         *prev;
3401         htable_t        *ht;
3402         uint_t          entry;
3403         level_t         level;
3404 
3405         XPV_DISALLOW_MIGRATE();
3406 
3407         /*
3408          * prevent recursion due to kmem_free()
3409          */
3410         ++curthread->t_hatdepth;
3411         ASSERT(curthread->t_hatdepth < 16);
3412 
3413 #if defined(__amd64)
3414         /*
3415          * clear the vpm ref.
3416          */
3417         if (vpm_enable) {
3418                 pp->p_vpmref = 0;
3419         }
3420 #endif
3421         /*
3422          * The loop with next_size handles pages with multiple pagesize mappings
3423          */
3424 next_size:
3425         for (;;) {
3426 
3427                 /*
3428                  * Get a mapping list entry
3429                  */
3430                 x86_hm_enter(cur_pp);
3431                 for (prev = NULL; ; prev = hm) {
3432                         hm = hment_walk(cur_pp, &ht, &entry, prev);
3433                         if (hm == NULL) {
3434                                 x86_hm_exit(cur_pp);
3435 
3436                                 /*
3437                                  * If not part of a larger page, we're done.
3438                                  */
3439                                 if (cur_pp->p_szc <= pg_szcd) {
3440                                         ASSERT(curthread->t_hatdepth > 0);
3441                                         --curthread->t_hatdepth;
3442                                         XPV_ALLOW_MIGRATE();
3443                                         return (0);
3444                                 }
3445 
3446                                 /*
3447                                  * Else check the next larger page size.
3448                                  * hat_page_demote() may decrease p_szc
3449                                  * but that's ok we'll just take an extra
3450                                  * trip discover there're no larger mappings
3451                                  * and return.
3452                                  */
3453                                 ++pg_szcd;
3454                                 cur_pp = PP_GROUPLEADER(cur_pp, pg_szcd);
3455                                 goto next_size;
3456                         }
3457 
3458                         /*
3459                          * If this mapping size matches, remove it.
3460                          */
3461                         level = ht->ht_level;
3462                         if (level == pg_szcd)
3463                                 break;
3464                 }
3465 
3466                 /*
3467                  * Remove the mapping list entry for this page.
3468                  * Note this does the x86_hm_exit() for us.
3469                  */
3470                 hm = hati_page_unmap(cur_pp, ht, entry);
3471                 if (hm != NULL)
3472                         hment_free(hm);
3473         }
3474 }
3475 
3476 int
3477 hat_pageunload(struct page *pp, uint_t forceflag)
3478 {
3479         ASSERT(PAGE_EXCL(pp));
3480         return (hati_pageunload(pp, 0, forceflag));
3481 }
3482 
3483 /*
3484  * Unload all large mappings to pp and reduce by 1 p_szc field of every large
3485  * page level that included pp.
3486  *
3487  * pp must be locked EXCL. Even though no other constituent pages are locked
3488  * it's legal to unload large mappings to pp because all constituent pages of
3489  * large locked mappings have to be locked SHARED.  therefore if we have EXCL
3490  * lock on one of constituent pages none of the large mappings to pp are
3491  * locked.
3492  *
3493  * Change (always decrease) p_szc field starting from the last constituent
3494  * page and ending with root constituent page so that root's pszc always shows
3495  * the area where hat_page_demote() may be active.
3496  *
3497  * This mechanism is only used for file system pages where it's not always
3498  * possible to get EXCL locks on all constituent pages to demote the size code
3499  * (as is done for anonymous or kernel large pages).
3500  */
3501 void
3502 hat_page_demote(page_t *pp)
3503 {
3504         uint_t          pszc;
3505         uint_t          rszc;
3506         uint_t          szc;
3507         page_t          *rootpp;
3508         page_t          *firstpp;
3509         page_t          *lastpp;
3510         pgcnt_t         pgcnt;
3511 
3512         ASSERT(PAGE_EXCL(pp));
3513         ASSERT(!PP_ISFREE(pp));
3514         ASSERT(page_szc_lock_assert(pp));
3515 
3516         if (pp->p_szc == 0)
3517                 return;
3518 
3519         rootpp = PP_GROUPLEADER(pp, 1);
3520         (void) hati_pageunload(rootpp, 1, HAT_FORCE_PGUNLOAD);
3521 
3522         /*
3523          * all large mappings to pp are gone
3524          * and no new can be setup since pp is locked exclusively.
3525          *
3526          * Lock the root to make sure there's only one hat_page_demote()
3527          * outstanding within the area of this root's pszc.
3528          *
3529          * Second potential hat_page_demote() is already eliminated by upper
3530          * VM layer via page_szc_lock() but we don't rely on it and use our
3531          * own locking (so that upper layer locking can be changed without
3532          * assumptions that hat depends on upper layer VM to prevent multiple
3533          * hat_page_demote() to be issued simultaneously to the same large
3534          * page).
3535          */
3536 again:
3537         pszc = pp->p_szc;
3538         if (pszc == 0)
3539                 return;
3540         rootpp = PP_GROUPLEADER(pp, pszc);
3541         x86_hm_enter(rootpp);
3542         /*
3543          * If root's p_szc is different from pszc we raced with another
3544          * hat_page_demote().  Drop the lock and try to find the root again.
3545          * If root's p_szc is greater than pszc previous hat_page_demote() is
3546          * not done yet.  Take and release mlist lock of root's root to wait
3547          * for previous hat_page_demote() to complete.
3548          */
3549         if ((rszc = rootpp->p_szc) != pszc) {
3550                 x86_hm_exit(rootpp);
3551                 if (rszc > pszc) {
3552                         /* p_szc of a locked non free page can't increase */
3553                         ASSERT(pp != rootpp);
3554 
3555                         rootpp = PP_GROUPLEADER(rootpp, rszc);
3556                         x86_hm_enter(rootpp);
3557                         x86_hm_exit(rootpp);
3558                 }
3559                 goto again;
3560         }
3561         ASSERT(pp->p_szc == pszc);
3562 
3563         /*
3564          * Decrement by 1 p_szc of every constituent page of a region that
3565          * covered pp. For example if original szc is 3 it gets changed to 2
3566          * everywhere except in region 2 that covered pp. Region 2 that
3567          * covered pp gets demoted to 1 everywhere except in region 1 that
3568          * covered pp. The region 1 that covered pp is demoted to region
3569          * 0. It's done this way because from region 3 we removed level 3
3570          * mappings, from region 2 that covered pp we removed level 2 mappings
3571          * and from region 1 that covered pp we removed level 1 mappings.  All
3572          * changes are done from from high pfn's to low pfn's so that roots
3573          * are changed last allowing one to know the largest region where
3574          * hat_page_demote() is stil active by only looking at the root page.
3575          *
3576          * This algorithm is implemented in 2 while loops. First loop changes
3577          * p_szc of pages to the right of pp's level 1 region and second
3578          * loop changes p_szc of pages of level 1 region that covers pp
3579          * and all pages to the left of level 1 region that covers pp.
3580          * In the first loop p_szc keeps dropping with every iteration
3581          * and in the second loop it keeps increasing with every iteration.
3582          *
3583          * First loop description: Demote pages to the right of pp outside of
3584          * level 1 region that covers pp.  In every iteration of the while
3585          * loop below find the last page of szc region and the first page of
3586          * (szc - 1) region that is immediately to the right of (szc - 1)
3587          * region that covers pp.  From last such page to first such page
3588          * change every page's szc to szc - 1. Decrement szc and continue
3589          * looping until szc is 1. If pp belongs to the last (szc - 1) region
3590          * of szc region skip to the next iteration.
3591          */
3592         szc = pszc;
3593         while (szc > 1) {
3594                 lastpp = PP_GROUPLEADER(pp, szc);
3595                 pgcnt = page_get_pagecnt(szc);
3596                 lastpp += pgcnt - 1;
3597                 firstpp = PP_GROUPLEADER(pp, (szc - 1));
3598                 pgcnt = page_get_pagecnt(szc - 1);
3599                 if (lastpp - firstpp < pgcnt) {
3600                         szc--;
3601                         continue;
3602                 }
3603                 firstpp += pgcnt;
3604                 while (lastpp != firstpp) {
3605                         ASSERT(lastpp->p_szc == pszc);
3606                         lastpp->p_szc = szc - 1;
3607                         lastpp--;
3608                 }
3609                 firstpp->p_szc = szc - 1;
3610                 szc--;
3611         }
3612 
3613         /*
3614          * Second loop description:
3615          * First iteration changes p_szc to 0 of every
3616          * page of level 1 region that covers pp.
3617          * Subsequent iterations find last page of szc region
3618          * immediately to the left of szc region that covered pp
3619          * and first page of (szc + 1) region that covers pp.
3620          * From last to first page change p_szc of every page to szc.
3621          * Increment szc and continue looping until szc is pszc.
3622          * If pp belongs to the fist szc region of (szc + 1) region
3623          * skip to the next iteration.
3624          *
3625          */
3626         szc = 0;
3627         while (szc < pszc) {
3628                 firstpp = PP_GROUPLEADER(pp, (szc + 1));
3629                 if (szc == 0) {
3630                         pgcnt = page_get_pagecnt(1);
3631                         lastpp = firstpp + (pgcnt - 1);
3632                 } else {
3633                         lastpp = PP_GROUPLEADER(pp, szc);
3634                         if (firstpp == lastpp) {
3635                                 szc++;
3636                                 continue;
3637                         }
3638                         lastpp--;
3639                         pgcnt = page_get_pagecnt(szc);
3640                 }
3641                 while (lastpp != firstpp) {
3642                         ASSERT(lastpp->p_szc == pszc);
3643                         lastpp->p_szc = szc;
3644                         lastpp--;
3645                 }
3646                 firstpp->p_szc = szc;
3647                 if (firstpp == rootpp)
3648                         break;
3649                 szc++;
3650         }
3651         x86_hm_exit(rootpp);
3652 }
3653 
3654 /*
3655  * get hw stats from hardware into page struct and reset hw stats
3656  * returns attributes of page
3657  * Flags for hat_pagesync, hat_getstat, hat_sync
3658  *
3659  * define       HAT_SYNC_ZERORM         0x01
3660  *
3661  * Additional flags for hat_pagesync
3662  *
3663  * define       HAT_SYNC_STOPON_REF     0x02
3664  * define       HAT_SYNC_STOPON_MOD     0x04
3665  * define       HAT_SYNC_STOPON_RM      0x06
3666  * define       HAT_SYNC_STOPON_SHARED  0x08
3667  */
3668 uint_t
3669 hat_pagesync(struct page *pp, uint_t flags)
3670 {
3671         hment_t         *hm = NULL;
3672         htable_t        *ht;
3673         uint_t          entry;
3674         x86pte_t        old, save_old;
3675         x86pte_t        new;
3676         uchar_t         nrmbits = P_REF|P_MOD|P_RO;
3677         extern ulong_t  po_share;
3678         page_t          *save_pp = pp;
3679         uint_t          pszc = 0;
3680 
3681         ASSERT(PAGE_LOCKED(pp) || panicstr);
3682 
3683         if (PP_ISRO(pp) && (flags & HAT_SYNC_STOPON_MOD))
3684                 return (pp->p_nrm & nrmbits);
3685 
3686         if ((flags & HAT_SYNC_ZERORM) == 0) {
3687 
3688                 if ((flags & HAT_SYNC_STOPON_REF) != 0 && PP_ISREF(pp))
3689                         return (pp->p_nrm & nrmbits);
3690 
3691                 if ((flags & HAT_SYNC_STOPON_MOD) != 0 && PP_ISMOD(pp))
3692                         return (pp->p_nrm & nrmbits);
3693 
3694                 if ((flags & HAT_SYNC_STOPON_SHARED) != 0 &&
3695                     hat_page_getshare(pp) > po_share) {
3696                         if (PP_ISRO(pp))
3697                                 PP_SETREF(pp);
3698                         return (pp->p_nrm & nrmbits);
3699                 }
3700         }
3701 
3702         XPV_DISALLOW_MIGRATE();
3703 next_size:
3704         /*
3705          * walk thru the mapping list syncing (and clearing) ref/mod bits.
3706          */
3707         x86_hm_enter(pp);
3708         while ((hm = hment_walk(pp, &ht, &entry, hm)) != NULL) {
3709                 if (ht->ht_level < pszc)
3710                         continue;
3711                 old = x86pte_get(ht, entry);
3712 try_again:
3713 
3714                 ASSERT(PTE2PFN(old, ht->ht_level) == pp->p_pagenum);
3715 
3716                 if (PTE_GET(old, PT_REF | PT_MOD) == 0)
3717                         continue;
3718 
3719                 save_old = old;
3720                 if ((flags & HAT_SYNC_ZERORM) != 0) {
3721 
3722                         /*
3723                          * Need to clear ref or mod bits. Need to demap
3724                          * to make sure any executing TLBs see cleared bits.
3725                          */
3726                         new = old;
3727                         PTE_CLR(new, PT_REF | PT_MOD);
3728                         old = hati_update_pte(ht, entry, old, new);
3729                         if (old != 0)
3730                                 goto try_again;
3731 
3732                         old = save_old;
3733                 }
3734 
3735                 /*
3736                  * Sync the PTE
3737                  */
3738                 if (!(flags & HAT_SYNC_ZERORM) &&
3739                     PTE_GET(old, PT_SOFTWARE) <= PT_NOSYNC)
3740                         hati_sync_pte_to_page(pp, old, ht->ht_level);
3741 
3742                 /*
3743                  * can stop short if we found a ref'd or mod'd page
3744                  */
3745                 if ((flags & HAT_SYNC_STOPON_MOD) && PP_ISMOD(save_pp) ||
3746                     (flags & HAT_SYNC_STOPON_REF) && PP_ISREF(save_pp)) {
3747                         x86_hm_exit(pp);
3748                         goto done;
3749                 }
3750         }
3751         x86_hm_exit(pp);
3752         while (pszc < pp->p_szc) {
3753                 page_t *tpp;
3754                 pszc++;
3755                 tpp = PP_GROUPLEADER(pp, pszc);
3756                 if (pp != tpp) {
3757                         pp = tpp;
3758                         goto next_size;
3759                 }
3760         }
3761 done:
3762         XPV_ALLOW_MIGRATE();
3763         return (save_pp->p_nrm & nrmbits);
3764 }
3765 
3766 /*
3767  * returns approx number of mappings to this pp.  A return of 0 implies
3768  * there are no mappings to the page.
3769  */
3770 ulong_t
3771 hat_page_getshare(page_t *pp)
3772 {
3773         uint_t cnt;
3774         cnt = hment_mapcnt(pp);
3775 #if defined(__amd64)
3776         if (vpm_enable && pp->p_vpmref) {
3777                 cnt += 1;
3778         }
3779 #endif
3780         return (cnt);
3781 }
3782 
3783 /*
3784  * Return 1 the number of mappings exceeds sh_thresh. Return 0
3785  * otherwise.
3786  */
3787 int
3788 hat_page_checkshare(page_t *pp, ulong_t sh_thresh)
3789 {
3790         return (hat_page_getshare(pp) > sh_thresh);
3791 }
3792 
3793 /*
3794  * hat_softlock isn't supported anymore
3795  */
3796 /*ARGSUSED*/
3797 faultcode_t
3798 hat_softlock(
3799         hat_t *hat,
3800         caddr_t addr,
3801         size_t *len,
3802         struct page **page_array,
3803         uint_t flags)
3804 {
3805         return (FC_NOSUPPORT);
3806 }
3807 
3808 
3809 
3810 /*
3811  * Routine to expose supported HAT features to platform independent code.
3812  */
3813 /*ARGSUSED*/
3814 int
3815 hat_supported(enum hat_features feature, void *arg)
3816 {
3817         switch (feature) {
3818 
3819         case HAT_SHARED_PT:     /* this is really ISM */
3820                 return (1);
3821 
3822         case HAT_DYNAMIC_ISM_UNMAP:
3823                 return (0);
3824 
3825         case HAT_VMODSORT:
3826                 return (1);
3827 
3828         case HAT_SHARED_REGIONS:
3829                 return (0);
3830 
3831         default:
3832                 panic("hat_supported() - unknown feature");
3833         }
3834         return (0);
3835 }
3836 
3837 /*
3838  * Called when a thread is exiting and has been switched to the kernel AS
3839  */
3840 void
3841 hat_thread_exit(kthread_t *thd)
3842 {
3843         ASSERT(thd->t_procp->p_as == &kas);
3844         XPV_DISALLOW_MIGRATE();
3845         hat_switch(thd->t_procp->p_as->a_hat);
3846         XPV_ALLOW_MIGRATE();
3847 }
3848 
3849 /*
3850  * Setup the given brand new hat structure as the new HAT on this cpu's mmu.
3851  */
3852 /*ARGSUSED*/
3853 void
3854 hat_setup(hat_t *hat, int flags)
3855 {
3856         XPV_DISALLOW_MIGRATE();
3857         kpreempt_disable();
3858 
3859         hat_switch(hat);
3860 
3861         kpreempt_enable();
3862         XPV_ALLOW_MIGRATE();
3863 }
3864 
3865 /*
3866  * Prepare for a CPU private mapping for the given address.
3867  *
3868  * The address can only be used from a single CPU and can be remapped
3869  * using hat_mempte_remap().  Return the address of the PTE.
3870  *
3871  * We do the htable_create() if necessary and increment the valid count so
3872  * the htable can't disappear.  We also hat_devload() the page table into
3873  * kernel so that the PTE is quickly accessed.
3874  */
3875 hat_mempte_t
3876 hat_mempte_setup(caddr_t addr)
3877 {
3878         uintptr_t       va = (uintptr_t)addr;
3879         htable_t        *ht;
3880         uint_t          entry;
3881         x86pte_t        oldpte;
3882         hat_mempte_t    p;
3883 
3884         ASSERT(IS_PAGEALIGNED(va));
3885         ASSERT(!IN_VA_HOLE(va));
3886         ++curthread->t_hatdepth;
3887         XPV_DISALLOW_MIGRATE();
3888         ht = htable_getpte(kas.a_hat, va, &entry, &oldpte, 0);
3889         if (ht == NULL) {
3890                 ht = htable_create(kas.a_hat, va, 0, NULL);
3891                 entry = htable_va2entry(va, ht);
3892                 ASSERT(ht->ht_level == 0);
3893                 oldpte = x86pte_get(ht, entry);
3894         }
3895         if (PTE_ISVALID(oldpte))
3896                 panic("hat_mempte_setup(): address already mapped"
3897                     "ht=%p, entry=%d, pte=" FMT_PTE, (void *)ht, entry, oldpte);
3898 
3899         /*
3900          * increment ht_valid_cnt so that the pagetable can't disappear
3901          */
3902         HTABLE_INC(ht->ht_valid_cnt);
3903 
3904         /*
3905          * return the PTE physical address to the caller.
3906          */
3907         htable_release(ht);
3908         XPV_ALLOW_MIGRATE();
3909         p = PT_INDEX_PHYSADDR(pfn_to_pa(ht->ht_pfn), entry);
3910         --curthread->t_hatdepth;
3911         return (p);
3912 }
3913 
3914 /*
3915  * Release a CPU private mapping for the given address.
3916  * We decrement the htable valid count so it might be destroyed.
3917  */
3918 /*ARGSUSED1*/
3919 void
3920 hat_mempte_release(caddr_t addr, hat_mempte_t pte_pa)
3921 {
3922         htable_t        *ht;
3923 
3924         XPV_DISALLOW_MIGRATE();
3925         /*
3926          * invalidate any left over mapping and decrement the htable valid count
3927          */
3928 #ifdef __xpv
3929         if (HYPERVISOR_update_va_mapping((uintptr_t)addr, 0,
3930             UVMF_INVLPG | UVMF_LOCAL))
3931                 panic("HYPERVISOR_update_va_mapping() failed");
3932 #else
3933         {
3934                 x86pte_t *pteptr;
3935 
3936                 pteptr = x86pte_mapin(mmu_btop(pte_pa),
3937                     (pte_pa & MMU_PAGEOFFSET) >> mmu.pte_size_shift, NULL);
3938                 if (mmu.pae_hat)
3939                         *pteptr = 0;
3940                 else
3941                         *(x86pte32_t *)pteptr = 0;
3942                 mmu_tlbflush_entry(addr);
3943                 x86pte_mapout();
3944         }
3945 #endif
3946 
3947         ht = htable_getpte(kas.a_hat, ALIGN2PAGE(addr), NULL, NULL, 0);
3948         if (ht == NULL)
3949                 panic("hat_mempte_release(): invalid address");
3950         ASSERT(ht->ht_level == 0);
3951         HTABLE_DEC(ht->ht_valid_cnt);
3952         htable_release(ht);
3953         XPV_ALLOW_MIGRATE();
3954 }
3955 
3956 /*
3957  * Apply a temporary CPU private mapping to a page. We flush the TLB only
3958  * on this CPU, so this ought to have been called with preemption disabled.
3959  */
3960 void
3961 hat_mempte_remap(
3962         pfn_t           pfn,
3963         caddr_t         addr,
3964         hat_mempte_t    pte_pa,
3965         uint_t          attr,
3966         uint_t          flags)
3967 {
3968         uintptr_t       va = (uintptr_t)addr;
3969         x86pte_t        pte;
3970 
3971         /*
3972          * Remap the given PTE to the new page's PFN. Invalidate only
3973          * on this CPU.
3974          */
3975 #ifdef DEBUG
3976         htable_t        *ht;
3977         uint_t          entry;
3978 
3979         ASSERT(IS_PAGEALIGNED(va));
3980         ASSERT(!IN_VA_HOLE(va));
3981         ht = htable_getpte(kas.a_hat, va, &entry, NULL, 0);
3982         ASSERT(ht != NULL);
3983         ASSERT(ht->ht_level == 0);
3984         ASSERT(ht->ht_valid_cnt > 0);
3985         ASSERT(ht->ht_pfn == mmu_btop(pte_pa));
3986         htable_release(ht);
3987 #endif
3988         XPV_DISALLOW_MIGRATE();
3989         pte = hati_mkpte(pfn, attr, 0, flags);
3990 #ifdef __xpv
3991         if (HYPERVISOR_update_va_mapping(va, pte, UVMF_INVLPG | UVMF_LOCAL))
3992                 panic("HYPERVISOR_update_va_mapping() failed");
3993 #else
3994         {
3995                 x86pte_t *pteptr;
3996 
3997                 pteptr = x86pte_mapin(mmu_btop(pte_pa),
3998                     (pte_pa & MMU_PAGEOFFSET) >> mmu.pte_size_shift, NULL);
3999                 if (mmu.pae_hat)
4000                         *(x86pte_t *)pteptr = pte;
4001                 else
4002                         *(x86pte32_t *)pteptr = (x86pte32_t)pte;
4003                 mmu_tlbflush_entry(addr);
4004                 x86pte_mapout();
4005         }
4006 #endif
4007         XPV_ALLOW_MIGRATE();
4008 }
4009 
4010 
4011 
4012 /*
4013  * Hat locking functions
4014  * XXX - these two functions are currently being used by hatstats
4015  *      they can be removed by using a per-as mutex for hatstats.
4016  */
4017 void
4018 hat_enter(hat_t *hat)
4019 {
4020         mutex_enter(&hat->hat_mutex);
4021 }
4022 
4023 void
4024 hat_exit(hat_t *hat)
4025 {
4026         mutex_exit(&hat->hat_mutex);
4027 }
4028 
4029 /*
4030  * HAT part of cpu initialization.
4031  */
4032 void
4033 hat_cpu_online(struct cpu *cpup)
4034 {
4035         if (cpup != CPU) {
4036                 x86pte_cpu_init(cpup);
4037                 hat_vlp_setup(cpup);
4038         }
4039         CPUSET_ATOMIC_ADD(khat_cpuset, cpup->cpu_id);
4040 }
4041 
4042 /*
4043  * HAT part of cpu deletion.
4044  * (currently, we only call this after the cpu is safely passivated.)
4045  */
4046 void
4047 hat_cpu_offline(struct cpu *cpup)
4048 {
4049         ASSERT(cpup != CPU);
4050 
4051         CPUSET_ATOMIC_DEL(khat_cpuset, cpup->cpu_id);
4052         hat_vlp_teardown(cpup);
4053         x86pte_cpu_fini(cpup);
4054 }
4055 
4056 /*
4057  * Function called after all CPUs are brought online.
4058  * Used to remove low address boot mappings.
4059  */
4060 void
4061 clear_boot_mappings(uintptr_t low, uintptr_t high)
4062 {
4063         uintptr_t vaddr = low;
4064         htable_t *ht = NULL;
4065         level_t level;
4066         uint_t entry;
4067         x86pte_t pte;
4068 
4069         /*
4070          * On 1st CPU we can unload the prom mappings, basically we blow away
4071          * all virtual mappings under _userlimit.
4072          */
4073         while (vaddr < high) {
4074                 pte = htable_walk(kas.a_hat, &ht, &vaddr, high);
4075                 if (ht == NULL)
4076                         break;
4077 
4078                 level = ht->ht_level;
4079                 entry = htable_va2entry(vaddr, ht);
4080                 ASSERT(level <= mmu.max_page_level);
4081                 ASSERT(PTE_ISPAGE(pte, level));
4082 
4083                 /*
4084                  * Unload the mapping from the page tables.
4085                  */
4086                 (void) x86pte_inval(ht, entry, 0, NULL, B_TRUE);
4087                 ASSERT(ht->ht_valid_cnt > 0);
4088                 HTABLE_DEC(ht->ht_valid_cnt);
4089                 PGCNT_DEC(ht->ht_hat, ht->ht_level);
4090 
4091                 vaddr += LEVEL_SIZE(ht->ht_level);
4092         }
4093         if (ht)
4094                 htable_release(ht);
4095 }
4096 
4097 /*
4098  * Atomically update a new translation for a single page.  If the
4099  * currently installed PTE doesn't match the value we expect to find,
4100  * it's not updated and we return the PTE we found.
4101  *
4102  * If activating nosync or NOWRITE and the page was modified we need to sync
4103  * with the page_t. Also sync with page_t if clearing ref/mod bits.
4104  */
4105 static x86pte_t
4106 hati_update_pte(htable_t *ht, uint_t entry, x86pte_t expected, x86pte_t new)
4107 {
4108         page_t          *pp;
4109         uint_t          rm = 0;
4110         x86pte_t        replaced;
4111 
4112         if (PTE_GET(expected, PT_SOFTWARE) < PT_NOSYNC &&
4113             PTE_GET(expected, PT_MOD | PT_REF) &&
4114             (PTE_GET(new, PT_NOSYNC) || !PTE_GET(new, PT_WRITABLE) ||
4115             !PTE_GET(new, PT_MOD | PT_REF))) {
4116 
4117                 ASSERT(!pfn_is_foreign(PTE2PFN(expected, ht->ht_level)));
4118                 pp = page_numtopp_nolock(PTE2PFN(expected, ht->ht_level));
4119                 ASSERT(pp != NULL);
4120                 if (PTE_GET(expected, PT_MOD))
4121                         rm |= P_MOD;
4122                 if (PTE_GET(expected, PT_REF))
4123                         rm |= P_REF;
4124                 PTE_CLR(new, PT_MOD | PT_REF);
4125         }
4126 
4127         replaced = x86pte_update(ht, entry, expected, new);
4128         if (replaced != expected)
4129                 return (replaced);
4130 
4131         if (rm) {
4132                 /*
4133                  * sync to all constituent pages of a large page
4134                  */
4135                 pgcnt_t pgcnt = page_get_pagecnt(ht->ht_level);
4136                 ASSERT(IS_P2ALIGNED(pp->p_pagenum, pgcnt));
4137                 while (pgcnt-- > 0) {
4138                         /*
4139                          * hat_page_demote() can't decrease
4140                          * pszc below this mapping size
4141                          * since large mapping existed after we
4142                          * took mlist lock.
4143                          */
4144                         ASSERT(pp->p_szc >= ht->ht_level);
4145                         hat_page_setattr(pp, rm);
4146                         ++pp;
4147                 }
4148         }
4149 
4150         return (0);
4151 }
4152 
4153 /* ARGSUSED */
4154 void
4155 hat_join_srd(struct hat *hat, vnode_t *evp)
4156 {
4157 }
4158 
4159 /* ARGSUSED */
4160 hat_region_cookie_t
4161 hat_join_region(struct hat *hat,
4162     caddr_t r_saddr,
4163     size_t r_size,
4164     void *r_obj,
4165     u_offset_t r_objoff,
4166     uchar_t r_perm,
4167     uchar_t r_pgszc,
4168     hat_rgn_cb_func_t r_cb_function,
4169     uint_t flags)
4170 {
4171         panic("No shared region support on x86");
4172         return (HAT_INVALID_REGION_COOKIE);
4173 }
4174 
4175 /* ARGSUSED */
4176 void
4177 hat_leave_region(struct hat *hat, hat_region_cookie_t rcookie, uint_t flags)
4178 {
4179         panic("No shared region support on x86");
4180 }
4181 
4182 /* ARGSUSED */
4183 void
4184 hat_dup_region(struct hat *hat, hat_region_cookie_t rcookie)
4185 {
4186         panic("No shared region support on x86");
4187 }
4188 
4189 
4190 /*
4191  * Kernel Physical Mapping (kpm) facility
4192  *
4193  * Most of the routines needed to support segkpm are almost no-ops on the
4194  * x86 platform.  We map in the entire segment when it is created and leave
4195  * it mapped in, so there is no additional work required to set up and tear
4196  * down individual mappings.  All of these routines were created to support
4197  * SPARC platforms that have to avoid aliasing in their virtually indexed
4198  * caches.
4199  *
4200  * Most of the routines have sanity checks in them (e.g. verifying that the
4201  * passed-in page is locked).  We don't actually care about most of these
4202  * checks on x86, but we leave them in place to identify problems in the
4203  * upper levels.
4204  */
4205 
4206 /*
4207  * Map in a locked page and return the vaddr.
4208  */
4209 /*ARGSUSED*/
4210 caddr_t
4211 hat_kpm_mapin(struct page *pp, struct kpme *kpme)
4212 {
4213         caddr_t         vaddr;
4214 
4215 #ifdef DEBUG
4216         if (kpm_enable == 0) {
4217                 cmn_err(CE_WARN, "hat_kpm_mapin: kpm_enable not set\n");
4218                 return ((caddr_t)NULL);
4219         }
4220 
4221         if (pp == NULL || PAGE_LOCKED(pp) == 0) {
4222                 cmn_err(CE_WARN, "hat_kpm_mapin: pp zero or not locked\n");
4223                 return ((caddr_t)NULL);
4224         }
4225 #endif
4226 
4227         vaddr = hat_kpm_page2va(pp, 1);
4228 
4229         return (vaddr);
4230 }
4231 
4232 /*
4233  * Mapout a locked page.
4234  */
4235 /*ARGSUSED*/
4236 void
4237 hat_kpm_mapout(struct page *pp, struct kpme *kpme, caddr_t vaddr)
4238 {
4239 #ifdef DEBUG
4240         if (kpm_enable == 0) {
4241                 cmn_err(CE_WARN, "hat_kpm_mapout: kpm_enable not set\n");
4242                 return;
4243         }
4244 
4245         if (IS_KPM_ADDR(vaddr) == 0) {
4246                 cmn_err(CE_WARN, "hat_kpm_mapout: no kpm address\n");
4247                 return;
4248         }
4249 
4250         if (pp == NULL || PAGE_LOCKED(pp) == 0) {
4251                 cmn_err(CE_WARN, "hat_kpm_mapout: page zero or not locked\n");
4252                 return;
4253         }
4254 #endif
4255 }
4256 
4257 /*
4258  * hat_kpm_mapin_pfn is used to obtain a kpm mapping for physical
4259  * memory addresses that are not described by a page_t.  It can
4260  * also be used for normal pages that are not locked, but beware
4261  * this is dangerous - no locking is performed, so the identity of
4262  * the page could change.  hat_kpm_mapin_pfn is not supported when
4263  * vac_colors > 1, because the chosen va depends on the page identity,
4264  * which could change.
4265  * The caller must only pass pfn's for valid physical addresses; violation
4266  * of this rule will cause panic.
4267  */
4268 caddr_t
4269 hat_kpm_mapin_pfn(pfn_t pfn)
4270 {
4271         caddr_t paddr, vaddr;
4272 
4273         if (kpm_enable == 0)
4274                 return ((caddr_t)NULL);
4275 
4276         paddr = (caddr_t)ptob(pfn);
4277         vaddr = (uintptr_t)kpm_vbase + paddr;
4278 
4279         return ((caddr_t)vaddr);
4280 }
4281 
4282 /*ARGSUSED*/
4283 void
4284 hat_kpm_mapout_pfn(pfn_t pfn)
4285 {
4286         /* empty */
4287 }
4288 
4289 /*
4290  * Return the kpm virtual address for a specific pfn
4291  */
4292 caddr_t
4293 hat_kpm_pfn2va(pfn_t pfn)
4294 {
4295         uintptr_t vaddr = (uintptr_t)kpm_vbase + mmu_ptob(pfn);
4296 
4297         ASSERT(!pfn_is_foreign(pfn));
4298         return ((caddr_t)vaddr);
4299 }
4300 
4301 /*
4302  * Return the kpm virtual address for the page at pp.
4303  */
4304 /*ARGSUSED*/
4305 caddr_t
4306 hat_kpm_page2va(struct page *pp, int checkswap)
4307 {
4308         return (hat_kpm_pfn2va(pp->p_pagenum));
4309 }
4310 
4311 /*
4312  * Return the page frame number for the kpm virtual address vaddr.
4313  */
4314 pfn_t
4315 hat_kpm_va2pfn(caddr_t vaddr)
4316 {
4317         pfn_t           pfn;
4318 
4319         ASSERT(IS_KPM_ADDR(vaddr));
4320 
4321         pfn = (pfn_t)btop(vaddr - kpm_vbase);
4322 
4323         return (pfn);
4324 }
4325 
4326 
4327 /*
4328  * Return the page for the kpm virtual address vaddr.
4329  */
4330 page_t *
4331 hat_kpm_vaddr2page(caddr_t vaddr)
4332 {
4333         pfn_t           pfn;
4334 
4335         ASSERT(IS_KPM_ADDR(vaddr));
4336 
4337         pfn = hat_kpm_va2pfn(vaddr);
4338 
4339         return (page_numtopp_nolock(pfn));
4340 }
4341 
4342 /*
4343  * hat_kpm_fault is called from segkpm_fault when we take a page fault on a
4344  * KPM page.  This should never happen on x86
4345  */
4346 int
4347 hat_kpm_fault(hat_t *hat, caddr_t vaddr)
4348 {
4349         panic("pagefault in seg_kpm.  hat: 0x%p  vaddr: 0x%p",
4350             (void *)hat, (void *)vaddr);
4351 
4352         return (0);
4353 }
4354 
4355 /*ARGSUSED*/
4356 void
4357 hat_kpm_mseghash_clear(int nentries)
4358 {}
4359 
4360 /*ARGSUSED*/
4361 void
4362 hat_kpm_mseghash_update(pgcnt_t inx, struct memseg *msp)
4363 {}
4364 
4365 #ifndef __xpv
4366 void
4367 hat_kpm_addmem_mseg_update(struct memseg *msp, pgcnt_t nkpmpgs,
4368         offset_t kpm_pages_off)
4369 {
4370         _NOTE(ARGUNUSED(nkpmpgs, kpm_pages_off));
4371         pfn_t base, end;
4372 
4373         /*
4374          * kphysm_add_memory_dynamic() does not set nkpmpgs
4375          * when page_t memory is externally allocated.  That
4376          * code must properly calculate nkpmpgs in all cases
4377          * if nkpmpgs needs to be used at some point.
4378          */
4379 
4380         /*
4381          * The meta (page_t) pages for dynamically added memory are allocated
4382          * either from the incoming memory itself or from existing memory.
4383          * In the former case the base of the incoming pages will be different
4384          * than the base of the dynamic segment so call memseg_get_start() to
4385          * get the actual base of the incoming memory for each case.
4386          */
4387 
4388         base = memseg_get_start(msp);
4389         end = msp->pages_end;
4390 
4391         hat_devload(kas.a_hat, kpm_vbase + mmu_ptob(base),
4392             mmu_ptob(end - base), base, PROT_READ | PROT_WRITE,
4393             HAT_LOAD | HAT_LOAD_LOCK | HAT_LOAD_NOCONSIST);
4394 }
4395 
4396 void
4397 hat_kpm_addmem_mseg_insert(struct memseg *msp)
4398 {
4399         _NOTE(ARGUNUSED(msp));
4400 }
4401 
4402 void
4403 hat_kpm_addmem_memsegs_update(struct memseg *msp)
4404 {
4405         _NOTE(ARGUNUSED(msp));
4406 }
4407 
4408 /*
4409  * Return end of metadata for an already setup memseg.
4410  * X86 platforms don't need per-page meta data to support kpm.
4411  */
4412 caddr_t
4413 hat_kpm_mseg_reuse(struct memseg *msp)
4414 {
4415         return ((caddr_t)msp->epages);
4416 }
4417 
4418 void
4419 hat_kpm_delmem_mseg_update(struct memseg *msp, struct memseg **mspp)
4420 {
4421         _NOTE(ARGUNUSED(msp, mspp));
4422         ASSERT(0);
4423 }
4424 
4425 void
4426 hat_kpm_split_mseg_update(struct memseg *msp, struct memseg **mspp,
4427         struct memseg *lo, struct memseg *mid, struct memseg *hi)
4428 {
4429         _NOTE(ARGUNUSED(msp, mspp, lo, mid, hi));
4430         ASSERT(0);
4431 }
4432 
4433 /*
4434  * Walk the memsegs chain, applying func to each memseg span.
4435  */
4436 void
4437 hat_kpm_walk(void (*func)(void *, void *, size_t), void *arg)
4438 {
4439         pfn_t   pbase, pend;
4440         void    *base;
4441         size_t  size;
4442         struct memseg *msp;
4443 
4444         for (msp = memsegs; msp; msp = msp->next) {
4445                 pbase = msp->pages_base;
4446                 pend = msp->pages_end;
4447                 base = ptob(pbase) + kpm_vbase;
4448                 size = ptob(pend - pbase);
4449                 func(arg, base, size);
4450         }
4451 }
4452 
4453 #else   /* __xpv */
4454 
4455 /*
4456  * There are specific Hypervisor calls to establish and remove mappings
4457  * to grant table references and the privcmd driver. We have to ensure
4458  * that a page table actually exists.
4459  */
4460 void
4461 hat_prepare_mapping(hat_t *hat, caddr_t addr, uint64_t *pte_ma)
4462 {
4463         maddr_t base_ma;
4464         htable_t *ht;
4465         uint_t entry;
4466 
4467         ASSERT(IS_P2ALIGNED((uintptr_t)addr, MMU_PAGESIZE));
4468         XPV_DISALLOW_MIGRATE();
4469         ht = htable_create(hat, (uintptr_t)addr, 0, NULL);
4470 
4471         /*
4472          * if an address for pte_ma is passed in, return the MA of the pte
4473          * for this specific address.  This address is only valid as long
4474          * as the htable stays locked.
4475          */
4476         if (pte_ma != NULL) {
4477                 entry = htable_va2entry((uintptr_t)addr, ht);
4478                 base_ma = pa_to_ma(ptob(ht->ht_pfn));
4479                 *pte_ma = base_ma + (entry << mmu.pte_size_shift);
4480         }
4481         XPV_ALLOW_MIGRATE();
4482 }
4483 
4484 void
4485 hat_release_mapping(hat_t *hat, caddr_t addr)
4486 {
4487         htable_t *ht;
4488 
4489         ASSERT(IS_P2ALIGNED((uintptr_t)addr, MMU_PAGESIZE));
4490         XPV_DISALLOW_MIGRATE();
4491         ht = htable_lookup(hat, (uintptr_t)addr, 0);
4492         ASSERT(ht != NULL);
4493         ASSERT(ht->ht_busy >= 2);
4494         htable_release(ht);
4495         htable_release(ht);
4496         XPV_ALLOW_MIGRATE();
4497 }
4498 #endif  /* __xpv */