1 /*
   2  * CDDL HEADER START
   3  *
   4  * The contents of this file are subject to the terms of the
   5  * Common Development and Distribution License (the "License").
   6  * You may not use this file except in compliance with the License.
   7  *
   8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
   9  * or http://www.opensolaris.org/os/licensing.
  10  * See the License for the specific language governing permissions
  11  * and limitations under the License.
  12  *
  13  * When distributing Covered Code, include this CDDL HEADER in each
  14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
  15  * If applicable, add the following below this CDDL HEADER, with the
  16  * fields enclosed by brackets "[]" replaced with your own identifying
  17  * information: Portions Copyright [yyyy] [name of copyright owner]
  18  *
  19  * CDDL HEADER END
  20  */
  21 /*
  22  * Copyright (c) 1992, 2010, Oracle and/or its affiliates. All rights reserved.
  23  */
  24 /*
  25  * Copyright (c) 2010, Intel Corporation.
  26  * All rights reserved.
  27  */
  28 /*
  29  * Copyright 2011 Nexenta Systems, Inc.  All rights reserved.
  30  * Copyright (c) 2014, 2015 by Delphix. All rights reserved.
  31  */
  32 
  33 /*
  34  * VM - Hardware Address Translation management for i386 and amd64
  35  *
  36  * Implementation of the interfaces described in <common/vm/hat.h>
  37  *
  38  * Nearly all the details of how the hardware is managed should not be
  39  * visible outside this layer except for misc. machine specific functions
  40  * that work in conjunction with this code.
  41  *
  42  * Routines used only inside of i86pc/vm start with hati_ for HAT Internal.
  43  */
  44 
  45 #include <sys/machparam.h>
  46 #include <sys/machsystm.h>
  47 #include <sys/mman.h>
  48 #include <sys/types.h>
  49 #include <sys/systm.h>
  50 #include <sys/cpuvar.h>
  51 #include <sys/thread.h>
  52 #include <sys/proc.h>
  53 #include <sys/cpu.h>
  54 #include <sys/kmem.h>
  55 #include <sys/disp.h>
  56 #include <sys/shm.h>
  57 #include <sys/sysmacros.h>
  58 #include <sys/machparam.h>
  59 #include <sys/vmem.h>
  60 #include <sys/vmsystm.h>
  61 #include <sys/promif.h>
  62 #include <sys/var.h>
  63 #include <sys/x86_archext.h>
  64 #include <sys/atomic.h>
  65 #include <sys/bitmap.h>
  66 #include <sys/controlregs.h>
  67 #include <sys/bootconf.h>
  68 #include <sys/bootsvcs.h>
  69 #include <sys/bootinfo.h>
  70 #include <sys/archsystm.h>
  71 
  72 #include <vm/seg_kmem.h>
  73 #include <vm/hat_i86.h>
  74 #include <vm/as.h>
  75 #include <vm/seg.h>
  76 #include <vm/page.h>
  77 #include <vm/seg_kp.h>
  78 #include <vm/seg_kpm.h>
  79 #include <vm/vm_dep.h>
  80 #ifdef __xpv
  81 #include <sys/hypervisor.h>
  82 #endif
  83 #include <vm/kboot_mmu.h>
  84 #include <vm/seg_spt.h>
  85 
  86 #include <sys/cmn_err.h>
  87 
  88 /*
  89  * Basic parameters for hat operation.
  90  */
  91 struct hat_mmu_info mmu;
  92 
  93 /*
  94  * The page that is the kernel's top level pagetable.
  95  *
  96  * For 32 bit PAE support on i86pc, the kernel hat will use the 1st 4 entries
  97  * on this 4K page for its top level page table. The remaining groups of
  98  * 4 entries are used for per processor copies of user VLP pagetables for
  99  * running threads.  See hat_switch() and reload_pae32() for details.
 100  *
 101  * vlp_page[0..3] - level==2 PTEs for kernel HAT
 102  * vlp_page[4..7] - level==2 PTEs for user thread on cpu 0
 103  * vlp_page[8..11]  - level==2 PTE for user thread on cpu 1
 104  * etc...
 105  */
 106 static x86pte_t *vlp_page;
 107 
 108 /*
 109  * forward declaration of internal utility routines
 110  */
 111 static x86pte_t hati_update_pte(htable_t *ht, uint_t entry, x86pte_t expected,
 112         x86pte_t new);
 113 
 114 /*
 115  * The kernel address space exists in all HATs. To implement this the
 116  * kernel reserves a fixed number of entries in the topmost level(s) of page
 117  * tables. The values are setup during startup and then copied to every user
 118  * hat created by hat_alloc(). This means that kernelbase must be:
 119  *
 120  *        4Meg aligned for 32 bit kernels
 121  *      512Gig aligned for x86_64 64 bit kernel
 122  *
 123  * The hat_kernel_range_ts describe what needs to be copied from kernel hat
 124  * to each user hat.
 125  */
 126 typedef struct hat_kernel_range {
 127         level_t         hkr_level;
 128         uintptr_t       hkr_start_va;
 129         uintptr_t       hkr_end_va;     /* zero means to end of memory */
 130 } hat_kernel_range_t;
 131 #define NUM_KERNEL_RANGE 2
 132 static hat_kernel_range_t kernel_ranges[NUM_KERNEL_RANGE];
 133 static int num_kernel_ranges;
 134 
 135 uint_t use_boot_reserve = 1;    /* cleared after early boot process */
 136 uint_t can_steal_post_boot = 0; /* set late in boot to enable stealing */
 137 
 138 /*
 139  * enable_1gpg: controls 1g page support for user applications.
 140  * By default, 1g pages are exported to user applications. enable_1gpg can
 141  * be set to 0 to not export.
 142  */
 143 int     enable_1gpg = 1;
 144 
 145 /*
 146  * AMD shanghai processors provide better management of 1gb ptes in its tlb.
 147  * By default, 1g page support will be disabled for pre-shanghai AMD
 148  * processors that don't have optimal tlb support for the 1g page size.
 149  * chk_optimal_1gtlb can be set to 0 to force 1g page support on sub-optimal
 150  * processors.
 151  */
 152 int     chk_optimal_1gtlb = 1;
 153 
 154 
 155 #ifdef DEBUG
 156 uint_t  map1gcnt;
 157 #endif
 158 
 159 
 160 /*
 161  * A cpuset for all cpus. This is used for kernel address cross calls, since
 162  * the kernel addresses apply to all cpus.
 163  */
 164 cpuset_t khat_cpuset;
 165 
 166 /*
 167  * management stuff for hat structures
 168  */
 169 kmutex_t        hat_list_lock;
 170 kcondvar_t      hat_list_cv;
 171 kmem_cache_t    *hat_cache;
 172 kmem_cache_t    *hat_hash_cache;
 173 kmem_cache_t    *vlp_hash_cache;
 174 
 175 /*
 176  * Simple statistics
 177  */
 178 struct hatstats hatstat;
 179 
 180 /*
 181  * Some earlier hypervisor versions do not emulate cmpxchg of PTEs
 182  * correctly.  For such hypervisors we must set PT_USER for kernel
 183  * entries ourselves (normally the emulation would set PT_USER for
 184  * kernel entries and PT_USER|PT_GLOBAL for user entries).  pt_kern is
 185  * thus set appropriately.  Note that dboot/kbm is OK, as only the full
 186  * HAT uses cmpxchg() and the other paths (hypercall etc.) were never
 187  * incorrect.
 188  */
 189 int pt_kern;
 190 
 191 /*
 192  * useful stuff for atomic access/clearing/setting REF/MOD/RO bits in page_t's.
 193  */
 194 extern void atomic_orb(uchar_t *addr, uchar_t val);
 195 extern void atomic_andb(uchar_t *addr, uchar_t val);
 196 
 197 #ifndef __xpv
 198 extern pfn_t memseg_get_start(struct memseg *);
 199 #endif
 200 
 201 #define PP_GETRM(pp, rmmask)    (pp->p_nrm & rmmask)
 202 #define PP_ISMOD(pp)            PP_GETRM(pp, P_MOD)
 203 #define PP_ISREF(pp)            PP_GETRM(pp, P_REF)
 204 #define PP_ISRO(pp)             PP_GETRM(pp, P_RO)
 205 
 206 #define PP_SETRM(pp, rm)        atomic_orb(&(pp->p_nrm), rm)
 207 #define PP_SETMOD(pp)           PP_SETRM(pp, P_MOD)
 208 #define PP_SETREF(pp)           PP_SETRM(pp, P_REF)
 209 #define PP_SETRO(pp)            PP_SETRM(pp, P_RO)
 210 
 211 #define PP_CLRRM(pp, rm)        atomic_andb(&(pp->p_nrm), ~(rm))
 212 #define PP_CLRMOD(pp)           PP_CLRRM(pp, P_MOD)
 213 #define PP_CLRREF(pp)           PP_CLRRM(pp, P_REF)
 214 #define PP_CLRRO(pp)            PP_CLRRM(pp, P_RO)
 215 #define PP_CLRALL(pp)           PP_CLRRM(pp, P_MOD | P_REF | P_RO)
 216 
 217 /*
 218  * kmem cache constructor for struct hat
 219  */
 220 /*ARGSUSED*/
 221 static int
 222 hati_constructor(void *buf, void *handle, int kmflags)
 223 {
 224         hat_t   *hat = buf;
 225 
 226         mutex_init(&hat->hat_mutex, NULL, MUTEX_DEFAULT, NULL);
 227         bzero(hat->hat_pages_mapped,
 228             sizeof (pgcnt_t) * (mmu.max_page_level + 1));
 229         hat->hat_ism_pgcnt = 0;
 230         hat->hat_stats = 0;
 231         hat->hat_flags = 0;
 232         CPUSET_ZERO(hat->hat_cpus);
 233         hat->hat_htable = NULL;
 234         hat->hat_ht_hash = NULL;
 235         return (0);
 236 }
 237 
 238 /*
 239  * Allocate a hat structure for as. We also create the top level
 240  * htable and initialize it to contain the kernel hat entries.
 241  */
 242 hat_t *
 243 hat_alloc(struct as *as)
 244 {
 245         hat_t                   *hat;
 246         htable_t                *ht;    /* top level htable */
 247         uint_t                  use_vlp;
 248         uint_t                  r;
 249         hat_kernel_range_t      *rp;
 250         uintptr_t               va;
 251         uintptr_t               eva;
 252         uint_t                  start;
 253         uint_t                  cnt;
 254         htable_t                *src;
 255 
 256         /*
 257          * Once we start creating user process HATs we can enable
 258          * the htable_steal() code.
 259          */
 260         if (can_steal_post_boot == 0)
 261                 can_steal_post_boot = 1;
 262 
 263         ASSERT(AS_WRITE_HELD(as));
 264         hat = kmem_cache_alloc(hat_cache, KM_SLEEP);
 265         hat->hat_as = as;
 266         mutex_init(&hat->hat_mutex, NULL, MUTEX_DEFAULT, NULL);
 267         ASSERT(hat->hat_flags == 0);
 268 
 269 #if defined(__xpv)
 270         /*
 271          * No VLP stuff on the hypervisor due to the 64-bit split top level
 272          * page tables.  On 32-bit it's not needed as the hypervisor takes
 273          * care of copying the top level PTEs to a below 4Gig page.
 274          */
 275         use_vlp = 0;
 276 #else   /* __xpv */
 277         /* 32 bit processes uses a VLP style hat when running with PAE */
 278 #if defined(__amd64)
 279         use_vlp = (ttoproc(curthread)->p_model == DATAMODEL_ILP32);
 280 #elif defined(__i386)
 281         use_vlp = mmu.pae_hat;
 282 #endif
 283 #endif  /* __xpv */
 284         if (use_vlp) {
 285                 hat->hat_flags = HAT_VLP;
 286                 bzero(hat->hat_vlp_ptes, VLP_SIZE);
 287         }
 288 
 289         /*
 290          * Allocate the htable hash
 291          */
 292         if ((hat->hat_flags & HAT_VLP)) {
 293                 hat->hat_num_hash = mmu.vlp_hash_cnt;
 294                 hat->hat_ht_hash = kmem_cache_alloc(vlp_hash_cache, KM_SLEEP);
 295         } else {
 296                 hat->hat_num_hash = mmu.hash_cnt;
 297                 hat->hat_ht_hash = kmem_cache_alloc(hat_hash_cache, KM_SLEEP);
 298         }
 299         bzero(hat->hat_ht_hash, hat->hat_num_hash * sizeof (htable_t *));
 300 
 301         /*
 302          * Initialize Kernel HAT entries at the top of the top level page
 303          * tables for the new hat.
 304          */
 305         hat->hat_htable = NULL;
 306         hat->hat_ht_cached = NULL;
 307         XPV_DISALLOW_MIGRATE();
 308         ht = htable_create(hat, (uintptr_t)0, TOP_LEVEL(hat), NULL);
 309         hat->hat_htable = ht;
 310 
 311 #if defined(__amd64)
 312         if (hat->hat_flags & HAT_VLP)
 313                 goto init_done;
 314 #endif
 315 
 316         for (r = 0; r < num_kernel_ranges; ++r) {
 317                 rp = &kernel_ranges[r];
 318                 for (va = rp->hkr_start_va; va != rp->hkr_end_va;
 319                     va += cnt * LEVEL_SIZE(rp->hkr_level)) {
 320 
 321                         if (rp->hkr_level == TOP_LEVEL(hat))
 322                                 ht = hat->hat_htable;
 323                         else
 324                                 ht = htable_create(hat, va, rp->hkr_level,
 325                                     NULL);
 326 
 327                         start = htable_va2entry(va, ht);
 328                         cnt = HTABLE_NUM_PTES(ht) - start;
 329                         eva = va +
 330                             ((uintptr_t)cnt << LEVEL_SHIFT(rp->hkr_level));
 331                         if (rp->hkr_end_va != 0 &&
 332                             (eva > rp->hkr_end_va || eva == 0))
 333                                 cnt = htable_va2entry(rp->hkr_end_va, ht) -
 334                                     start;
 335 
 336 #if defined(__i386) && !defined(__xpv)
 337                         if (ht->ht_flags & HTABLE_VLP) {
 338                                 bcopy(&vlp_page[start],
 339                                     &hat->hat_vlp_ptes[start],
 340                                     cnt * sizeof (x86pte_t));
 341                                 continue;
 342                         }
 343 #endif
 344                         src = htable_lookup(kas.a_hat, va, rp->hkr_level);
 345                         ASSERT(src != NULL);
 346                         x86pte_copy(src, ht, start, cnt);
 347                         htable_release(src);
 348                 }
 349         }
 350 
 351 init_done:
 352 
 353 #if defined(__xpv)
 354         /*
 355          * Pin top level page tables after initializing them
 356          */
 357         xen_pin(hat->hat_htable->ht_pfn, mmu.max_level);
 358 #if defined(__amd64)
 359         xen_pin(hat->hat_user_ptable, mmu.max_level);
 360 #endif
 361 #endif
 362         XPV_ALLOW_MIGRATE();
 363 
 364         /*
 365          * Put it at the start of the global list of all hats (used by stealing)
 366          *
 367          * kas.a_hat is not in the list but is instead used to find the
 368          * first and last items in the list.
 369          *
 370          * - kas.a_hat->hat_next points to the start of the user hats.
 371          *   The list ends where hat->hat_next == NULL
 372          *
 373          * - kas.a_hat->hat_prev points to the last of the user hats.
 374          *   The list begins where hat->hat_prev == NULL
 375          */
 376         mutex_enter(&hat_list_lock);
 377         hat->hat_prev = NULL;
 378         hat->hat_next = kas.a_hat->hat_next;
 379         if (hat->hat_next)
 380                 hat->hat_next->hat_prev = hat;
 381         else
 382                 kas.a_hat->hat_prev = hat;
 383         kas.a_hat->hat_next = hat;
 384         mutex_exit(&hat_list_lock);
 385 
 386         return (hat);
 387 }
 388 
 389 /*
 390  * process has finished executing but as has not been cleaned up yet.
 391  */
 392 /*ARGSUSED*/
 393 void
 394 hat_free_start(hat_t *hat)
 395 {
 396         ASSERT(AS_WRITE_HELD(hat->hat_as));
 397 
 398         /*
 399          * If the hat is currently a stealing victim, wait for the stealing
 400          * to finish.  Once we mark it as HAT_FREEING, htable_steal()
 401          * won't look at its pagetables anymore.
 402          */
 403         mutex_enter(&hat_list_lock);
 404         while (hat->hat_flags & HAT_VICTIM)
 405                 cv_wait(&hat_list_cv, &hat_list_lock);
 406         hat->hat_flags |= HAT_FREEING;
 407         mutex_exit(&hat_list_lock);
 408 }
 409 
 410 /*
 411  * An address space is being destroyed, so we destroy the associated hat.
 412  */
 413 void
 414 hat_free_end(hat_t *hat)
 415 {
 416         kmem_cache_t *cache;
 417 
 418         ASSERT(hat->hat_flags & HAT_FREEING);
 419 
 420         /*
 421          * must not be running on the given hat
 422          */
 423         ASSERT(CPU->cpu_current_hat != hat);
 424 
 425         /*
 426          * Remove it from the list of HATs
 427          */
 428         mutex_enter(&hat_list_lock);
 429         if (hat->hat_prev)
 430                 hat->hat_prev->hat_next = hat->hat_next;
 431         else
 432                 kas.a_hat->hat_next = hat->hat_next;
 433         if (hat->hat_next)
 434                 hat->hat_next->hat_prev = hat->hat_prev;
 435         else
 436                 kas.a_hat->hat_prev = hat->hat_prev;
 437         mutex_exit(&hat_list_lock);
 438         hat->hat_next = hat->hat_prev = NULL;
 439 
 440 #if defined(__xpv)
 441         /*
 442          * On the hypervisor, unpin top level page table(s)
 443          */
 444         xen_unpin(hat->hat_htable->ht_pfn);
 445 #if defined(__amd64)
 446         xen_unpin(hat->hat_user_ptable);
 447 #endif
 448 #endif
 449 
 450         /*
 451          * Make a pass through the htables freeing them all up.
 452          */
 453         htable_purge_hat(hat);
 454 
 455         /*
 456          * Decide which kmem cache the hash table came from, then free it.
 457          */
 458         if (hat->hat_flags & HAT_VLP)
 459                 cache = vlp_hash_cache;
 460         else
 461                 cache = hat_hash_cache;
 462         kmem_cache_free(cache, hat->hat_ht_hash);
 463         hat->hat_ht_hash = NULL;
 464 
 465         hat->hat_flags = 0;
 466         kmem_cache_free(hat_cache, hat);
 467 }
 468 
 469 /*
 470  * round kernelbase down to a supported value to use for _userlimit
 471  *
 472  * userlimit must be aligned down to an entry in the top level htable.
 473  * The one exception is for 32 bit HAT's running PAE.
 474  */
 475 uintptr_t
 476 hat_kernelbase(uintptr_t va)
 477 {
 478 #if defined(__i386)
 479         va &= LEVEL_MASK(1);
 480 #endif
 481         if (IN_VA_HOLE(va))
 482                 panic("_userlimit %p will fall in VA hole\n", (void *)va);
 483         return (va);
 484 }
 485 
 486 /*
 487  *
 488  */
 489 static void
 490 set_max_page_level()
 491 {
 492         level_t lvl;
 493 
 494         if (!kbm_largepage_support) {
 495                 lvl = 0;
 496         } else {
 497                 if (is_x86_feature(x86_featureset, X86FSET_1GPG)) {
 498                         lvl = 2;
 499                         if (chk_optimal_1gtlb &&
 500                             cpuid_opteron_erratum(CPU, 6671130)) {
 501                                 lvl = 1;
 502                         }
 503                         if (plat_mnode_xcheck(LEVEL_SIZE(2) >>
 504                             LEVEL_SHIFT(0))) {
 505                                 lvl = 1;
 506                         }
 507                 } else {
 508                         lvl = 1;
 509                 }
 510         }
 511         mmu.max_page_level = lvl;
 512 
 513         if ((lvl == 2) && (enable_1gpg == 0))
 514                 mmu.umax_page_level = 1;
 515         else
 516                 mmu.umax_page_level = lvl;
 517 }
 518 
 519 /*
 520  * Initialize hat data structures based on processor MMU information.
 521  */
 522 void
 523 mmu_init(void)
 524 {
 525         uint_t max_htables;
 526         uint_t pa_bits;
 527         uint_t va_bits;
 528         int i;
 529 
 530         /*
 531          * If CPU enabled the page table global bit, use it for the kernel
 532          * This is bit 7 in CR4 (PGE - Page Global Enable).
 533          */
 534         if (is_x86_feature(x86_featureset, X86FSET_PGE) &&
 535             (getcr4() & CR4_PGE) != 0)
 536                 mmu.pt_global = PT_GLOBAL;
 537 
 538         /*
 539          * Detect NX and PAE usage.
 540          */
 541         mmu.pae_hat = kbm_pae_support;
 542         if (kbm_nx_support)
 543                 mmu.pt_nx = PT_NX;
 544         else
 545                 mmu.pt_nx = 0;
 546 
 547         /*
 548          * Use CPU info to set various MMU parameters
 549          */
 550         cpuid_get_addrsize(CPU, &pa_bits, &va_bits);
 551 
 552         if (va_bits < sizeof (void *) * NBBY) {
 553                 mmu.hole_start = (1ul << (va_bits - 1));
 554                 mmu.hole_end = 0ul - mmu.hole_start - 1;
 555         } else {
 556                 mmu.hole_end = 0;
 557                 mmu.hole_start = mmu.hole_end - 1;
 558         }
 559 #if defined(OPTERON_ERRATUM_121)
 560         /*
 561          * If erratum 121 has already been detected at this time, hole_start
 562          * contains the value to be subtracted from mmu.hole_start.
 563          */
 564         ASSERT(hole_start == 0 || opteron_erratum_121 != 0);
 565         hole_start = mmu.hole_start - hole_start;
 566 #else
 567         hole_start = mmu.hole_start;
 568 #endif
 569         hole_end = mmu.hole_end;
 570 
 571         mmu.highest_pfn = mmu_btop((1ull << pa_bits) - 1);
 572         if (mmu.pae_hat == 0 && pa_bits > 32)
 573                 mmu.highest_pfn = PFN_4G - 1;
 574 
 575         if (mmu.pae_hat) {
 576                 mmu.pte_size = 8;       /* 8 byte PTEs */
 577                 mmu.pte_size_shift = 3;
 578         } else {
 579                 mmu.pte_size = 4;       /* 4 byte PTEs */
 580                 mmu.pte_size_shift = 2;
 581         }
 582 
 583         if (mmu.pae_hat && !is_x86_feature(x86_featureset, X86FSET_PAE))
 584                 panic("Processor does not support PAE");
 585 
 586         if (!is_x86_feature(x86_featureset, X86FSET_CX8))
 587                 panic("Processor does not support cmpxchg8b instruction");
 588 
 589 #if defined(__amd64)
 590 
 591         mmu.num_level = 4;
 592         mmu.max_level = 3;
 593         mmu.ptes_per_table = 512;
 594         mmu.top_level_count = 512;
 595 
 596         mmu.level_shift[0] = 12;
 597         mmu.level_shift[1] = 21;
 598         mmu.level_shift[2] = 30;
 599         mmu.level_shift[3] = 39;
 600 
 601 #elif defined(__i386)
 602 
 603         if (mmu.pae_hat) {
 604                 mmu.num_level = 3;
 605                 mmu.max_level = 2;
 606                 mmu.ptes_per_table = 512;
 607                 mmu.top_level_count = 4;
 608 
 609                 mmu.level_shift[0] = 12;
 610                 mmu.level_shift[1] = 21;
 611                 mmu.level_shift[2] = 30;
 612 
 613         } else {
 614                 mmu.num_level = 2;
 615                 mmu.max_level = 1;
 616                 mmu.ptes_per_table = 1024;
 617                 mmu.top_level_count = 1024;
 618 
 619                 mmu.level_shift[0] = 12;
 620                 mmu.level_shift[1] = 22;
 621         }
 622 
 623 #endif  /* __i386 */
 624 
 625         for (i = 0; i < mmu.num_level; ++i) {
 626                 mmu.level_size[i] = 1UL << mmu.level_shift[i];
 627                 mmu.level_offset[i] = mmu.level_size[i] - 1;
 628                 mmu.level_mask[i] = ~mmu.level_offset[i];
 629         }
 630 
 631         set_max_page_level();
 632 
 633         mmu_page_sizes = mmu.max_page_level + 1;
 634         mmu_exported_page_sizes = mmu.umax_page_level + 1;
 635 
 636         /* restrict legacy applications from using pagesizes 1g and above */
 637         mmu_legacy_page_sizes =
 638             (mmu_exported_page_sizes > 2) ? 2 : mmu_exported_page_sizes;
 639 
 640 
 641         for (i = 0; i <= mmu.max_page_level; ++i) {
 642                 mmu.pte_bits[i] = PT_VALID | pt_kern;
 643                 if (i > 0)
 644                         mmu.pte_bits[i] |= PT_PAGESIZE;
 645         }
 646 
 647         /*
 648          * NOTE Legacy 32 bit PAE mode only has the P_VALID bit at top level.
 649          */
 650         for (i = 1; i < mmu.num_level; ++i)
 651                 mmu.ptp_bits[i] = PT_PTPBITS;
 652 
 653 #if defined(__i386)
 654         mmu.ptp_bits[2] = PT_VALID;
 655 #endif
 656 
 657         /*
 658          * Compute how many hash table entries to have per process for htables.
 659          * We start with 1 page's worth of entries.
 660          *
 661          * If physical memory is small, reduce the amount need to cover it.
 662          */
 663         max_htables = physmax / mmu.ptes_per_table;
 664         mmu.hash_cnt = MMU_PAGESIZE / sizeof (htable_t *);
 665         while (mmu.hash_cnt > 16 && mmu.hash_cnt >= max_htables)
 666                 mmu.hash_cnt >>= 1;
 667         mmu.vlp_hash_cnt = mmu.hash_cnt;
 668 
 669 #if defined(__amd64)
 670         /*
 671          * If running in 64 bits and physical memory is large,
 672          * increase the size of the cache to cover all of memory for
 673          * a 64 bit process.
 674          */
 675 #define HASH_MAX_LENGTH 4
 676         while (mmu.hash_cnt * HASH_MAX_LENGTH < max_htables)
 677                 mmu.hash_cnt <<= 1;
 678 #endif
 679 }
 680 
 681 
 682 /*
 683  * initialize hat data structures
 684  */
 685 void
 686 hat_init()
 687 {
 688 #if defined(__i386)
 689         /*
 690          * _userlimit must be aligned correctly
 691          */
 692         if ((_userlimit & LEVEL_MASK(1)) != _userlimit) {
 693                 prom_printf("hat_init(): _userlimit=%p, not aligned at %p\n",
 694                     (void *)_userlimit, (void *)LEVEL_SIZE(1));
 695                 halt("hat_init(): Unable to continue");
 696         }
 697 #endif
 698 
 699         cv_init(&hat_list_cv, NULL, CV_DEFAULT, NULL);
 700 
 701         /*
 702          * initialize kmem caches
 703          */
 704         htable_init();
 705         hment_init();
 706 
 707         hat_cache = kmem_cache_create("hat_t",
 708             sizeof (hat_t), 0, hati_constructor, NULL, NULL,
 709             NULL, 0, 0);
 710 
 711         hat_hash_cache = kmem_cache_create("HatHash",
 712             mmu.hash_cnt * sizeof (htable_t *), 0, NULL, NULL, NULL,
 713             NULL, 0, 0);
 714 
 715         /*
 716          * VLP hats can use a smaller hash table size on large memroy machines
 717          */
 718         if (mmu.hash_cnt == mmu.vlp_hash_cnt) {
 719                 vlp_hash_cache = hat_hash_cache;
 720         } else {
 721                 vlp_hash_cache = kmem_cache_create("HatVlpHash",
 722                     mmu.vlp_hash_cnt * sizeof (htable_t *), 0, NULL, NULL, NULL,
 723                     NULL, 0, 0);
 724         }
 725 
 726         /*
 727          * Set up the kernel's hat
 728          */
 729         AS_LOCK_ENTER(&kas, RW_WRITER);
 730         kas.a_hat = kmem_cache_alloc(hat_cache, KM_NOSLEEP);
 731         mutex_init(&kas.a_hat->hat_mutex, NULL, MUTEX_DEFAULT, NULL);
 732         kas.a_hat->hat_as = &kas;
 733         kas.a_hat->hat_flags = 0;
 734         AS_LOCK_EXIT(&kas);
 735 
 736         CPUSET_ZERO(khat_cpuset);
 737         CPUSET_ADD(khat_cpuset, CPU->cpu_id);
 738 
 739         /*
 740          * The kernel hat's next pointer serves as the head of the hat list .
 741          * The kernel hat's prev pointer tracks the last hat on the list for
 742          * htable_steal() to use.
 743          */
 744         kas.a_hat->hat_next = NULL;
 745         kas.a_hat->hat_prev = NULL;
 746 
 747         /*
 748          * Allocate an htable hash bucket for the kernel
 749          * XX64 - tune for 64 bit procs
 750          */
 751         kas.a_hat->hat_num_hash = mmu.hash_cnt;
 752         kas.a_hat->hat_ht_hash = kmem_cache_alloc(hat_hash_cache, KM_NOSLEEP);
 753         bzero(kas.a_hat->hat_ht_hash, mmu.hash_cnt * sizeof (htable_t *));
 754 
 755         /*
 756          * zero out the top level and cached htable pointers
 757          */
 758         kas.a_hat->hat_ht_cached = NULL;
 759         kas.a_hat->hat_htable = NULL;
 760 
 761         /*
 762          * Pre-allocate hrm_hashtab before enabling the collection of
 763          * refmod statistics.  Allocating on the fly would mean us
 764          * running the risk of suffering recursive mutex enters or
 765          * deadlocks.
 766          */
 767         hrm_hashtab = kmem_zalloc(HRM_HASHSIZE * sizeof (struct hrmstat *),
 768             KM_SLEEP);
 769 }
 770 
 771 /*
 772  * Prepare CPU specific pagetables for VLP processes on 64 bit kernels.
 773  *
 774  * Each CPU has a set of 2 pagetables that are reused for any 32 bit
 775  * process it runs. They are the top level pagetable, hci_vlp_l3ptes, and
 776  * the next to top level table for the bottom 512 Gig, hci_vlp_l2ptes.
 777  */
 778 /*ARGSUSED*/
 779 static void
 780 hat_vlp_setup(struct cpu *cpu)
 781 {
 782 #if defined(__amd64) && !defined(__xpv)
 783         struct hat_cpu_info *hci = cpu->cpu_hat_info;
 784         pfn_t pfn;
 785 
 786         /*
 787          * allocate the level==2 page table for the bottom most
 788          * 512Gig of address space (this is where 32 bit apps live)
 789          */
 790         ASSERT(hci != NULL);
 791         hci->hci_vlp_l2ptes = kmem_zalloc(MMU_PAGESIZE, KM_SLEEP);
 792 
 793         /*
 794          * Allocate a top level pagetable and copy the kernel's
 795          * entries into it. Then link in hci_vlp_l2ptes in the 1st entry.
 796          */
 797         hci->hci_vlp_l3ptes = kmem_zalloc(MMU_PAGESIZE, KM_SLEEP);
 798         hci->hci_vlp_pfn =
 799             hat_getpfnum(kas.a_hat, (caddr_t)hci->hci_vlp_l3ptes);
 800         ASSERT(hci->hci_vlp_pfn != PFN_INVALID);
 801         bcopy(vlp_page, hci->hci_vlp_l3ptes, MMU_PAGESIZE);
 802 
 803         pfn = hat_getpfnum(kas.a_hat, (caddr_t)hci->hci_vlp_l2ptes);
 804         ASSERT(pfn != PFN_INVALID);
 805         hci->hci_vlp_l3ptes[0] = MAKEPTP(pfn, 2);
 806 #endif /* __amd64 && !__xpv */
 807 }
 808 
 809 /*ARGSUSED*/
 810 static void
 811 hat_vlp_teardown(cpu_t *cpu)
 812 {
 813 #if defined(__amd64) && !defined(__xpv)
 814         struct hat_cpu_info *hci;
 815 
 816         if ((hci = cpu->cpu_hat_info) == NULL)
 817                 return;
 818         if (hci->hci_vlp_l2ptes)
 819                 kmem_free(hci->hci_vlp_l2ptes, MMU_PAGESIZE);
 820         if (hci->hci_vlp_l3ptes)
 821                 kmem_free(hci->hci_vlp_l3ptes, MMU_PAGESIZE);
 822 #endif
 823 }
 824 
 825 #define NEXT_HKR(r, l, s, e) {                  \
 826         kernel_ranges[r].hkr_level = l;         \
 827         kernel_ranges[r].hkr_start_va = s;      \
 828         kernel_ranges[r].hkr_end_va = e;        \
 829         ++r;                                    \
 830 }
 831 
 832 /*
 833  * Finish filling in the kernel hat.
 834  * Pre fill in all top level kernel page table entries for the kernel's
 835  * part of the address range.  From this point on we can't use any new
 836  * kernel large pages if they need PTE's at max_level
 837  *
 838  * create the kmap mappings.
 839  */
 840 void
 841 hat_init_finish(void)
 842 {
 843         size_t          size;
 844         uint_t          r = 0;
 845         uintptr_t       va;
 846         hat_kernel_range_t *rp;
 847 
 848 
 849         /*
 850          * We are now effectively running on the kernel hat.
 851          * Clearing use_boot_reserve shuts off using the pre-allocated boot
 852          * reserve for all HAT allocations.  From here on, the reserves are
 853          * only used when avoiding recursion in kmem_alloc().
 854          */
 855         use_boot_reserve = 0;
 856         htable_adjust_reserve();
 857 
 858         /*
 859          * User HATs are initialized with copies of all kernel mappings in
 860          * higher level page tables. Ensure that those entries exist.
 861          */
 862 #if defined(__amd64)
 863 
 864         NEXT_HKR(r, 3, kernelbase, 0);
 865 #if defined(__xpv)
 866         NEXT_HKR(r, 3, HYPERVISOR_VIRT_START, HYPERVISOR_VIRT_END);
 867 #endif
 868 
 869 #elif defined(__i386)
 870 
 871 #if !defined(__xpv)
 872         if (mmu.pae_hat) {
 873                 va = kernelbase;
 874                 if ((va & LEVEL_MASK(2)) != va) {
 875                         va = P2ROUNDUP(va, LEVEL_SIZE(2));
 876                         NEXT_HKR(r, 1, kernelbase, va);
 877                 }
 878                 if (va != 0)
 879                         NEXT_HKR(r, 2, va, 0);
 880         } else
 881 #endif /* __xpv */
 882                 NEXT_HKR(r, 1, kernelbase, 0);
 883 
 884 #endif /* __i386 */
 885 
 886         num_kernel_ranges = r;
 887 
 888         /*
 889          * Create all the kernel pagetables that will have entries
 890          * shared to user HATs.
 891          */
 892         for (r = 0; r < num_kernel_ranges; ++r) {
 893                 rp = &kernel_ranges[r];
 894                 for (va = rp->hkr_start_va; va != rp->hkr_end_va;
 895                     va += LEVEL_SIZE(rp->hkr_level)) {
 896                         htable_t *ht;
 897 
 898                         if (IN_HYPERVISOR_VA(va))
 899                                 continue;
 900 
 901                         /* can/must skip if a page mapping already exists */
 902                         if (rp->hkr_level <= mmu.max_page_level &&
 903                             (ht = htable_getpage(kas.a_hat, va, NULL)) !=
 904                             NULL) {
 905                                 htable_release(ht);
 906                                 continue;
 907                         }
 908 
 909                         (void) htable_create(kas.a_hat, va, rp->hkr_level - 1,
 910                             NULL);
 911                 }
 912         }
 913 
 914         /*
 915          * 32 bit PAE metal kernels use only 4 of the 512 entries in the
 916          * page holding the top level pagetable. We use the remainder for
 917          * the "per CPU" page tables for VLP processes.
 918          * Map the top level kernel pagetable into the kernel to make
 919          * it easy to use bcopy access these tables.
 920          */
 921         if (mmu.pae_hat) {
 922                 vlp_page = vmem_alloc(heap_arena, MMU_PAGESIZE, VM_SLEEP);
 923                 hat_devload(kas.a_hat, (caddr_t)vlp_page, MMU_PAGESIZE,
 924                     kas.a_hat->hat_htable->ht_pfn,
 925 #if !defined(__xpv)
 926                     PROT_WRITE |
 927 #endif
 928                     PROT_READ | HAT_NOSYNC | HAT_UNORDERED_OK,
 929                     HAT_LOAD | HAT_LOAD_NOCONSIST);
 930         }
 931         hat_vlp_setup(CPU);
 932 
 933         /*
 934          * Create kmap (cached mappings of kernel PTEs)
 935          * for 32 bit we map from segmap_start .. ekernelheap
 936          * for 64 bit we map from segmap_start .. segmap_start + segmapsize;
 937          */
 938 #if defined(__i386)
 939         size = (uintptr_t)ekernelheap - segmap_start;
 940 #elif defined(__amd64)
 941         size = segmapsize;
 942 #endif
 943         hat_kmap_init((uintptr_t)segmap_start, size);
 944 }
 945 
 946 /*
 947  * On 32 bit PAE mode, PTE's are 64 bits, but ordinary atomic memory references
 948  * are 32 bit, so for safety we must use atomic_cas_64() to install these.
 949  */
 950 #ifdef __i386
 951 static void
 952 reload_pae32(hat_t *hat, cpu_t *cpu)
 953 {
 954         x86pte_t *src;
 955         x86pte_t *dest;
 956         x86pte_t pte;
 957         int i;
 958 
 959         /*
 960          * Load the 4 entries of the level 2 page table into this
 961          * cpu's range of the vlp_page and point cr3 at them.
 962          */
 963         ASSERT(mmu.pae_hat);
 964         src = hat->hat_vlp_ptes;
 965         dest = vlp_page + (cpu->cpu_id + 1) * VLP_NUM_PTES;
 966         for (i = 0; i < VLP_NUM_PTES; ++i) {
 967                 for (;;) {
 968                         pte = dest[i];
 969                         if (pte == src[i])
 970                                 break;
 971                         if (atomic_cas_64(dest + i, pte, src[i]) != src[i])
 972                                 break;
 973                 }
 974         }
 975 }
 976 #endif
 977 
 978 /*
 979  * Switch to a new active hat, maintaining bit masks to track active CPUs.
 980  *
 981  * On the 32-bit PAE hypervisor, %cr3 is a 64-bit value, on metal it
 982  * remains a 32-bit value.
 983  */
 984 void
 985 hat_switch(hat_t *hat)
 986 {
 987         uint64_t        newcr3;
 988         cpu_t           *cpu = CPU;
 989         hat_t           *old = cpu->cpu_current_hat;
 990 
 991         /*
 992          * set up this information first, so we don't miss any cross calls
 993          */
 994         if (old != NULL) {
 995                 if (old == hat)
 996                         return;
 997                 if (old != kas.a_hat)
 998                         CPUSET_ATOMIC_DEL(old->hat_cpus, cpu->cpu_id);
 999         }
1000 
1001         /*
1002          * Add this CPU to the active set for this HAT.
1003          */
1004         if (hat != kas.a_hat) {
1005                 CPUSET_ATOMIC_ADD(hat->hat_cpus, cpu->cpu_id);
1006         }
1007         cpu->cpu_current_hat = hat;
1008 
1009         /*
1010          * now go ahead and load cr3
1011          */
1012         if (hat->hat_flags & HAT_VLP) {
1013 #if defined(__amd64)
1014                 x86pte_t *vlpptep = cpu->cpu_hat_info->hci_vlp_l2ptes;
1015 
1016                 VLP_COPY(hat->hat_vlp_ptes, vlpptep);
1017                 newcr3 = MAKECR3(cpu->cpu_hat_info->hci_vlp_pfn);
1018 #elif defined(__i386)
1019                 reload_pae32(hat, cpu);
1020                 newcr3 = MAKECR3(kas.a_hat->hat_htable->ht_pfn) +
1021                     (cpu->cpu_id + 1) * VLP_SIZE;
1022 #endif
1023         } else {
1024                 newcr3 = MAKECR3((uint64_t)hat->hat_htable->ht_pfn);
1025         }
1026 #ifdef __xpv
1027         {
1028                 struct mmuext_op t[2];
1029                 uint_t retcnt;
1030                 uint_t opcnt = 1;
1031 
1032                 t[0].cmd = MMUEXT_NEW_BASEPTR;
1033                 t[0].arg1.mfn = mmu_btop(pa_to_ma(newcr3));
1034 #if defined(__amd64)
1035                 /*
1036                  * There's an interesting problem here, as to what to
1037                  * actually specify when switching to the kernel hat.
1038                  * For now we'll reuse the kernel hat again.
1039                  */
1040                 t[1].cmd = MMUEXT_NEW_USER_BASEPTR;
1041                 if (hat == kas.a_hat)
1042                         t[1].arg1.mfn = mmu_btop(pa_to_ma(newcr3));
1043                 else
1044                         t[1].arg1.mfn = pfn_to_mfn(hat->hat_user_ptable);
1045                 ++opcnt;
1046 #endif  /* __amd64 */
1047                 if (HYPERVISOR_mmuext_op(t, opcnt, &retcnt, DOMID_SELF) < 0)
1048                         panic("HYPERVISOR_mmu_update() failed");
1049                 ASSERT(retcnt == opcnt);
1050 
1051         }
1052 #else
1053         setcr3(newcr3);
1054 #endif
1055         ASSERT(cpu == CPU);
1056 }
1057 
1058 /*
1059  * Utility to return a valid x86pte_t from protections, pfn, and level number
1060  */
1061 static x86pte_t
1062 hati_mkpte(pfn_t pfn, uint_t attr, level_t level, uint_t flags)
1063 {
1064         x86pte_t        pte;
1065         uint_t          cache_attr = attr & HAT_ORDER_MASK;
1066 
1067         pte = MAKEPTE(pfn, level);
1068 
1069         if (attr & PROT_WRITE)
1070                 PTE_SET(pte, PT_WRITABLE);
1071 
1072         if (attr & PROT_USER)
1073                 PTE_SET(pte, PT_USER);
1074 
1075         if (!(attr & PROT_EXEC))
1076                 PTE_SET(pte, mmu.pt_nx);
1077 
1078         /*
1079          * Set the software bits used track ref/mod sync's and hments.
1080          * If not using REF/MOD, set them to avoid h/w rewriting PTEs.
1081          */
1082         if (flags & HAT_LOAD_NOCONSIST)
1083                 PTE_SET(pte, PT_NOCONSIST | PT_REF | PT_MOD);
1084         else if (attr & HAT_NOSYNC)
1085                 PTE_SET(pte, PT_NOSYNC | PT_REF | PT_MOD);
1086 
1087         /*
1088          * Set the caching attributes in the PTE. The combination
1089          * of attributes are poorly defined, so we pay attention
1090          * to them in the given order.
1091          *
1092          * The test for HAT_STRICTORDER is different because it's defined
1093          * as "0" - which was a stupid thing to do, but is too late to change!
1094          */
1095         if (cache_attr == HAT_STRICTORDER) {
1096                 PTE_SET(pte, PT_NOCACHE);
1097         /*LINTED [Lint hates empty ifs, but it's the obvious way to do this] */
1098         } else if (cache_attr & (HAT_UNORDERED_OK | HAT_STORECACHING_OK)) {
1099                 /* nothing to set */;
1100         } else if (cache_attr & (HAT_MERGING_OK | HAT_LOADCACHING_OK)) {
1101                 PTE_SET(pte, PT_NOCACHE);
1102                 if (is_x86_feature(x86_featureset, X86FSET_PAT))
1103                         PTE_SET(pte, (level == 0) ? PT_PAT_4K : PT_PAT_LARGE);
1104                 else
1105                         PTE_SET(pte, PT_WRITETHRU);
1106         } else {
1107                 panic("hati_mkpte(): bad caching attributes: %x\n", cache_attr);
1108         }
1109 
1110         return (pte);
1111 }
1112 
1113 /*
1114  * Duplicate address translations of the parent to the child.
1115  * This function really isn't used anymore.
1116  */
1117 /*ARGSUSED*/
1118 int
1119 hat_dup(hat_t *old, hat_t *new, caddr_t addr, size_t len, uint_t flag)
1120 {
1121         ASSERT((uintptr_t)addr < kernelbase);
1122         ASSERT(new != kas.a_hat);
1123         ASSERT(old != kas.a_hat);
1124         return (0);
1125 }
1126 
1127 /*
1128  * Allocate any hat resources required for a process being swapped in.
1129  */
1130 /*ARGSUSED*/
1131 void
1132 hat_swapin(hat_t *hat)
1133 {
1134         /* do nothing - we let everything fault back in */
1135 }
1136 
1137 /*
1138  * Unload all translations associated with an address space of a process
1139  * that is being swapped out.
1140  */
1141 void
1142 hat_swapout(hat_t *hat)
1143 {
1144         uintptr_t       vaddr = (uintptr_t)0;
1145         uintptr_t       eaddr = _userlimit;
1146         htable_t        *ht = NULL;
1147         level_t         l;
1148 
1149         XPV_DISALLOW_MIGRATE();
1150         /*
1151          * We can't just call hat_unload(hat, 0, _userlimit...)  here, because
1152          * seg_spt and shared pagetables can't be swapped out.
1153          * Take a look at segspt_shmswapout() - it's a big no-op.
1154          *
1155          * Instead we'll walk through all the address space and unload
1156          * any mappings which we are sure are not shared, not locked.
1157          */
1158         ASSERT(IS_PAGEALIGNED(vaddr));
1159         ASSERT(IS_PAGEALIGNED(eaddr));
1160         ASSERT(AS_LOCK_HELD(hat->hat_as));
1161         if ((uintptr_t)hat->hat_as->a_userlimit < eaddr)
1162                 eaddr = (uintptr_t)hat->hat_as->a_userlimit;
1163 
1164         while (vaddr < eaddr) {
1165                 (void) htable_walk(hat, &ht, &vaddr, eaddr);
1166                 if (ht == NULL)
1167                         break;
1168 
1169                 ASSERT(!IN_VA_HOLE(vaddr));
1170 
1171                 /*
1172                  * If the page table is shared skip its entire range.
1173                  */
1174                 l = ht->ht_level;
1175                 if (ht->ht_flags & HTABLE_SHARED_PFN) {
1176                         vaddr = ht->ht_vaddr + LEVEL_SIZE(l + 1);
1177                         htable_release(ht);
1178                         ht = NULL;
1179                         continue;
1180                 }
1181 
1182                 /*
1183                  * If the page table has no locked entries, unload this one.
1184                  */
1185                 if (ht->ht_lock_cnt == 0)
1186                         hat_unload(hat, (caddr_t)vaddr, LEVEL_SIZE(l),
1187                             HAT_UNLOAD_UNMAP);
1188 
1189                 /*
1190                  * If we have a level 0 page table with locked entries,
1191                  * skip the entire page table, otherwise skip just one entry.
1192                  */
1193                 if (ht->ht_lock_cnt > 0 && l == 0)
1194                         vaddr = ht->ht_vaddr + LEVEL_SIZE(1);
1195                 else
1196                         vaddr += LEVEL_SIZE(l);
1197         }
1198         if (ht)
1199                 htable_release(ht);
1200 
1201         /*
1202          * We're in swapout because the system is low on memory, so
1203          * go back and flush all the htables off the cached list.
1204          */
1205         htable_purge_hat(hat);
1206         XPV_ALLOW_MIGRATE();
1207 }
1208 
1209 /*
1210  * returns number of bytes that have valid mappings in hat.
1211  */
1212 size_t
1213 hat_get_mapped_size(hat_t *hat)
1214 {
1215         size_t total = 0;
1216         int l;
1217 
1218         for (l = 0; l <= mmu.max_page_level; l++)
1219                 total += (hat->hat_pages_mapped[l] << LEVEL_SHIFT(l));
1220         total += hat->hat_ism_pgcnt;
1221 
1222         return (total);
1223 }
1224 
1225 /*
1226  * enable/disable collection of stats for hat.
1227  */
1228 int
1229 hat_stats_enable(hat_t *hat)
1230 {
1231         atomic_inc_32(&hat->hat_stats);
1232         return (1);
1233 }
1234 
1235 void
1236 hat_stats_disable(hat_t *hat)
1237 {
1238         atomic_dec_32(&hat->hat_stats);
1239 }
1240 
1241 /*
1242  * Utility to sync the ref/mod bits from a page table entry to the page_t
1243  * We must be holding the mapping list lock when this is called.
1244  */
1245 static void
1246 hati_sync_pte_to_page(page_t *pp, x86pte_t pte, level_t level)
1247 {
1248         uint_t  rm = 0;
1249         pgcnt_t pgcnt;
1250 
1251         if (PTE_GET(pte, PT_SOFTWARE) >= PT_NOSYNC)
1252                 return;
1253 
1254         if (PTE_GET(pte, PT_REF))
1255                 rm |= P_REF;
1256 
1257         if (PTE_GET(pte, PT_MOD))
1258                 rm |= P_MOD;
1259 
1260         if (rm == 0)
1261                 return;
1262 
1263         /*
1264          * sync to all constituent pages of a large page
1265          */
1266         ASSERT(x86_hm_held(pp));
1267         pgcnt = page_get_pagecnt(level);
1268         ASSERT(IS_P2ALIGNED(pp->p_pagenum, pgcnt));
1269         for (; pgcnt > 0; --pgcnt) {
1270                 /*
1271                  * hat_page_demote() can't decrease
1272                  * pszc below this mapping size
1273                  * since this large mapping existed after we
1274                  * took mlist lock.
1275                  */
1276                 ASSERT(pp->p_szc >= level);
1277                 hat_page_setattr(pp, rm);
1278                 ++pp;
1279         }
1280 }
1281 
1282 /*
1283  * This the set of PTE bits for PFN, permissions and caching
1284  * that are allowed to change on a HAT_LOAD_REMAP
1285  */
1286 #define PT_REMAP_BITS                                                   \
1287         (PT_PADDR | PT_NX | PT_WRITABLE | PT_WRITETHRU |                \
1288         PT_NOCACHE | PT_PAT_4K | PT_PAT_LARGE | PT_IGNORE | PT_REF | PT_MOD)
1289 
1290 #define REMAPASSERT(EX) if (!(EX)) panic("hati_pte_map: " #EX)
1291 /*
1292  * Do the low-level work to get a mapping entered into a HAT's pagetables
1293  * and in the mapping list of the associated page_t.
1294  */
1295 static int
1296 hati_pte_map(
1297         htable_t        *ht,
1298         uint_t          entry,
1299         page_t          *pp,
1300         x86pte_t        pte,
1301         int             flags,
1302         void            *pte_ptr)
1303 {
1304         hat_t           *hat = ht->ht_hat;
1305         x86pte_t        old_pte;
1306         level_t         l = ht->ht_level;
1307         hment_t         *hm;
1308         uint_t          is_consist;
1309         uint_t          is_locked;
1310         int             rv = 0;
1311 
1312         /*
1313          * Is this a consistent (ie. need mapping list lock) mapping?
1314          */
1315         is_consist = (pp != NULL && (flags & HAT_LOAD_NOCONSIST) == 0);
1316 
1317         /*
1318          * Track locked mapping count in the htable.  Do this first,
1319          * as we track locking even if there already is a mapping present.
1320          */
1321         is_locked = (flags & HAT_LOAD_LOCK) != 0 && hat != kas.a_hat;
1322         if (is_locked)
1323                 HTABLE_LOCK_INC(ht);
1324 
1325         /*
1326          * Acquire the page's mapping list lock and get an hment to use.
1327          * Note that hment_prepare() might return NULL.
1328          */
1329         if (is_consist) {
1330                 x86_hm_enter(pp);
1331                 hm = hment_prepare(ht, entry, pp);
1332         }
1333 
1334         /*
1335          * Set the new pte, retrieving the old one at the same time.
1336          */
1337         old_pte = x86pte_set(ht, entry, pte, pte_ptr);
1338 
1339         /*
1340          * Did we get a large page / page table collision?
1341          */
1342         if (old_pte == LPAGE_ERROR) {
1343                 if (is_locked)
1344                         HTABLE_LOCK_DEC(ht);
1345                 rv = -1;
1346                 goto done;
1347         }
1348 
1349         /*
1350          * If the mapping didn't change there is nothing more to do.
1351          */
1352         if (PTE_EQUIV(pte, old_pte))
1353                 goto done;
1354 
1355         /*
1356          * Install a new mapping in the page's mapping list
1357          */
1358         if (!PTE_ISVALID(old_pte)) {
1359                 if (is_consist) {
1360                         hment_assign(ht, entry, pp, hm);
1361                         x86_hm_exit(pp);
1362                 } else {
1363                         ASSERT(flags & HAT_LOAD_NOCONSIST);
1364                 }
1365 #if defined(__amd64)
1366                 if (ht->ht_flags & HTABLE_VLP) {
1367                         cpu_t *cpu = CPU;
1368                         x86pte_t *vlpptep = cpu->cpu_hat_info->hci_vlp_l2ptes;
1369                         VLP_COPY(hat->hat_vlp_ptes, vlpptep);
1370                 }
1371 #endif
1372                 HTABLE_INC(ht->ht_valid_cnt);
1373                 PGCNT_INC(hat, l);
1374                 return (rv);
1375         }
1376 
1377         /*
1378          * Remap's are more complicated:
1379          *  - HAT_LOAD_REMAP must be specified if changing the pfn.
1380          *    We also require that NOCONSIST be specified.
1381          *  - Otherwise only permission or caching bits may change.
1382          */
1383         if (!PTE_ISPAGE(old_pte, l))
1384                 panic("non-null/page mapping pte=" FMT_PTE, old_pte);
1385 
1386         if (PTE2PFN(old_pte, l) != PTE2PFN(pte, l)) {
1387                 REMAPASSERT(flags & HAT_LOAD_REMAP);
1388                 REMAPASSERT(flags & HAT_LOAD_NOCONSIST);
1389                 REMAPASSERT(PTE_GET(old_pte, PT_SOFTWARE) >= PT_NOCONSIST);
1390                 REMAPASSERT(pf_is_memory(PTE2PFN(old_pte, l)) ==
1391                     pf_is_memory(PTE2PFN(pte, l)));
1392                 REMAPASSERT(!is_consist);
1393         }
1394 
1395         /*
1396          * We only let remaps change the certain bits in the PTE.
1397          */
1398         if (PTE_GET(old_pte, ~PT_REMAP_BITS) != PTE_GET(pte, ~PT_REMAP_BITS))
1399                 panic("remap bits changed: old_pte="FMT_PTE", pte="FMT_PTE"\n",
1400                     old_pte, pte);
1401 
1402         /*
1403          * We don't create any mapping list entries on a remap, so release
1404          * any allocated hment after we drop the mapping list lock.
1405          */
1406 done:
1407         if (is_consist) {
1408                 x86_hm_exit(pp);
1409                 if (hm != NULL)
1410                         hment_free(hm);
1411         }
1412         return (rv);
1413 }
1414 
1415 /*
1416  * Internal routine to load a single page table entry. This only fails if
1417  * we attempt to overwrite a page table link with a large page.
1418  */
1419 static int
1420 hati_load_common(
1421         hat_t           *hat,
1422         uintptr_t       va,
1423         page_t          *pp,
1424         uint_t          attr,
1425         uint_t          flags,
1426         level_t         level,
1427         pfn_t           pfn)
1428 {
1429         htable_t        *ht;
1430         uint_t          entry;
1431         x86pte_t        pte;
1432         int             rv = 0;
1433 
1434         /*
1435          * The number 16 is arbitrary and here to catch a recursion problem
1436          * early before we blow out the kernel stack.
1437          */
1438         ++curthread->t_hatdepth;
1439         ASSERT(curthread->t_hatdepth < 16);
1440 
1441         ASSERT(hat == kas.a_hat || AS_LOCK_HELD(hat->hat_as));
1442 
1443         if (flags & HAT_LOAD_SHARE)
1444                 hat->hat_flags |= HAT_SHARED;
1445 
1446         /*
1447          * Find the page table that maps this page if it already exists.
1448          */
1449         ht = htable_lookup(hat, va, level);
1450 
1451         /*
1452          * We must have HAT_LOAD_NOCONSIST if page_t is NULL.
1453          */
1454         if (pp == NULL)
1455                 flags |= HAT_LOAD_NOCONSIST;
1456 
1457         if (ht == NULL) {
1458                 ht = htable_create(hat, va, level, NULL);
1459                 ASSERT(ht != NULL);
1460         }
1461         entry = htable_va2entry(va, ht);
1462 
1463         /*
1464          * a bunch of paranoid error checking
1465          */
1466         ASSERT(ht->ht_busy > 0);
1467         if (ht->ht_vaddr > va || va > HTABLE_LAST_PAGE(ht))
1468                 panic("hati_load_common: bad htable %p, va %p",
1469                     (void *)ht, (void *)va);
1470         ASSERT(ht->ht_level == level);
1471 
1472         /*
1473          * construct the new PTE
1474          */
1475         if (hat == kas.a_hat)
1476                 attr &= ~PROT_USER;
1477         pte = hati_mkpte(pfn, attr, level, flags);
1478         if (hat == kas.a_hat && va >= kernelbase)
1479                 PTE_SET(pte, mmu.pt_global);
1480 
1481         /*
1482          * establish the mapping
1483          */
1484         rv = hati_pte_map(ht, entry, pp, pte, flags, NULL);
1485 
1486         /*
1487          * release the htable and any reserves
1488          */
1489         htable_release(ht);
1490         --curthread->t_hatdepth;
1491         return (rv);
1492 }
1493 
1494 /*
1495  * special case of hat_memload to deal with some kernel addrs for performance
1496  */
1497 static void
1498 hat_kmap_load(
1499         caddr_t         addr,
1500         page_t          *pp,
1501         uint_t          attr,
1502         uint_t          flags)
1503 {
1504         uintptr_t       va = (uintptr_t)addr;
1505         x86pte_t        pte;
1506         pfn_t           pfn = page_pptonum(pp);
1507         pgcnt_t         pg_off = mmu_btop(va - mmu.kmap_addr);
1508         htable_t        *ht;
1509         uint_t          entry;
1510         void            *pte_ptr;
1511 
1512         /*
1513          * construct the requested PTE
1514          */
1515         attr &= ~PROT_USER;
1516         attr |= HAT_STORECACHING_OK;
1517         pte = hati_mkpte(pfn, attr, 0, flags);
1518         PTE_SET(pte, mmu.pt_global);
1519 
1520         /*
1521          * Figure out the pte_ptr and htable and use common code to finish up
1522          */
1523         if (mmu.pae_hat)
1524                 pte_ptr = mmu.kmap_ptes + pg_off;
1525         else
1526                 pte_ptr = (x86pte32_t *)mmu.kmap_ptes + pg_off;
1527         ht = mmu.kmap_htables[(va - mmu.kmap_htables[0]->ht_vaddr) >>
1528             LEVEL_SHIFT(1)];
1529         entry = htable_va2entry(va, ht);
1530         ++curthread->t_hatdepth;
1531         ASSERT(curthread->t_hatdepth < 16);
1532         (void) hati_pte_map(ht, entry, pp, pte, flags, pte_ptr);
1533         --curthread->t_hatdepth;
1534 }
1535 
1536 /*
1537  * hat_memload() - load a translation to the given page struct
1538  *
1539  * Flags for hat_memload/hat_devload/hat_*attr.
1540  *
1541  *      HAT_LOAD        Default flags to load a translation to the page.
1542  *
1543  *      HAT_LOAD_LOCK   Lock down mapping resources; hat_map(), hat_memload(),
1544  *                      and hat_devload().
1545  *
1546  *      HAT_LOAD_NOCONSIST Do not add mapping to page_t mapping list.
1547  *                      sets PT_NOCONSIST
1548  *
1549  *      HAT_LOAD_SHARE  A flag to hat_memload() to indicate h/w page tables
1550  *                      that map some user pages (not kas) is shared by more
1551  *                      than one process (eg. ISM).
1552  *
1553  *      HAT_LOAD_REMAP  Reload a valid pte with a different page frame.
1554  *
1555  *      HAT_NO_KALLOC   Do not kmem_alloc while creating the mapping; at this
1556  *                      point, it's setting up mapping to allocate internal
1557  *                      hat layer data structures.  This flag forces hat layer
1558  *                      to tap its reserves in order to prevent infinite
1559  *                      recursion.
1560  *
1561  * The following is a protection attribute (like PROT_READ, etc.)
1562  *
1563  *      HAT_NOSYNC      set PT_NOSYNC - this mapping's ref/mod bits
1564  *                      are never cleared.
1565  *
1566  * Installing new valid PTE's and creation of the mapping list
1567  * entry are controlled under the same lock. It's derived from the
1568  * page_t being mapped.
1569  */
1570 static uint_t supported_memload_flags =
1571         HAT_LOAD | HAT_LOAD_LOCK | HAT_LOAD_ADV | HAT_LOAD_NOCONSIST |
1572         HAT_LOAD_SHARE | HAT_NO_KALLOC | HAT_LOAD_REMAP | HAT_LOAD_TEXT;
1573 
1574 void
1575 hat_memload(
1576         hat_t           *hat,
1577         caddr_t         addr,
1578         page_t          *pp,
1579         uint_t          attr,
1580         uint_t          flags)
1581 {
1582         uintptr_t       va = (uintptr_t)addr;
1583         level_t         level = 0;
1584         pfn_t           pfn = page_pptonum(pp);
1585 
1586         XPV_DISALLOW_MIGRATE();
1587         ASSERT(IS_PAGEALIGNED(va));
1588         ASSERT(hat == kas.a_hat || va < _userlimit);
1589         ASSERT(hat == kas.a_hat || AS_LOCK_HELD(hat->hat_as));
1590         ASSERT((flags & supported_memload_flags) == flags);
1591 
1592         ASSERT(!IN_VA_HOLE(va));
1593         ASSERT(!PP_ISFREE(pp));
1594 
1595         /*
1596          * kernel address special case for performance.
1597          */
1598         if (mmu.kmap_addr <= va && va < mmu.kmap_eaddr) {
1599                 ASSERT(hat == kas.a_hat);
1600                 hat_kmap_load(addr, pp, attr, flags);
1601                 XPV_ALLOW_MIGRATE();
1602                 return;
1603         }
1604 
1605         /*
1606          * This is used for memory with normal caching enabled, so
1607          * always set HAT_STORECACHING_OK.
1608          */
1609         attr |= HAT_STORECACHING_OK;
1610         if (hati_load_common(hat, va, pp, attr, flags, level, pfn) != 0)
1611                 panic("unexpected hati_load_common() failure");
1612         XPV_ALLOW_MIGRATE();
1613 }
1614 
1615 /* ARGSUSED */
1616 void
1617 hat_memload_region(struct hat *hat, caddr_t addr, struct page *pp,
1618     uint_t attr, uint_t flags, hat_region_cookie_t rcookie)
1619 {
1620         hat_memload(hat, addr, pp, attr, flags);
1621 }
1622 
1623 /*
1624  * Load the given array of page structs using large pages when possible
1625  */
1626 void
1627 hat_memload_array(
1628         hat_t           *hat,
1629         caddr_t         addr,
1630         size_t          len,
1631         page_t          **pages,
1632         uint_t          attr,
1633         uint_t          flags)
1634 {
1635         uintptr_t       va = (uintptr_t)addr;
1636         uintptr_t       eaddr = va + len;
1637         level_t         level;
1638         size_t          pgsize;
1639         pgcnt_t         pgindx = 0;
1640         pfn_t           pfn;
1641         pgcnt_t         i;
1642 
1643         XPV_DISALLOW_MIGRATE();
1644         ASSERT(IS_PAGEALIGNED(va));
1645         ASSERT(hat == kas.a_hat || va + len <= _userlimit);
1646         ASSERT(hat == kas.a_hat || AS_LOCK_HELD(hat->hat_as));
1647         ASSERT((flags & supported_memload_flags) == flags);
1648 
1649         /*
1650          * memload is used for memory with full caching enabled, so
1651          * set HAT_STORECACHING_OK.
1652          */
1653         attr |= HAT_STORECACHING_OK;
1654 
1655         /*
1656          * handle all pages using largest possible pagesize
1657          */
1658         while (va < eaddr) {
1659                 /*
1660                  * decide what level mapping to use (ie. pagesize)
1661                  */
1662                 pfn = page_pptonum(pages[pgindx]);
1663                 for (level = mmu.max_page_level; ; --level) {
1664                         pgsize = LEVEL_SIZE(level);
1665                         if (level == 0)
1666                                 break;
1667 
1668                         if (!IS_P2ALIGNED(va, pgsize) ||
1669                             (eaddr - va) < pgsize ||
1670                             !IS_P2ALIGNED(pfn_to_pa(pfn), pgsize))
1671                                 continue;
1672 
1673                         /*
1674                          * To use a large mapping of this size, all the
1675                          * pages we are passed must be sequential subpages
1676                          * of the large page.
1677                          * hat_page_demote() can't change p_szc because
1678                          * all pages are locked.
1679                          */
1680                         if (pages[pgindx]->p_szc >= level) {
1681                                 for (i = 0; i < mmu_btop(pgsize); ++i) {
1682                                         if (pfn + i !=
1683                                             page_pptonum(pages[pgindx + i]))
1684                                                 break;
1685                                         ASSERT(pages[pgindx + i]->p_szc >=
1686                                             level);
1687                                         ASSERT(pages[pgindx] + i ==
1688                                             pages[pgindx + i]);
1689                                 }
1690                                 if (i == mmu_btop(pgsize)) {
1691 #ifdef DEBUG
1692                                         if (level == 2)
1693                                                 map1gcnt++;
1694 #endif
1695                                         break;
1696                                 }
1697                         }
1698                 }
1699 
1700                 /*
1701                  * Load this page mapping. If the load fails, try a smaller
1702                  * pagesize.
1703                  */
1704                 ASSERT(!IN_VA_HOLE(va));
1705                 while (hati_load_common(hat, va, pages[pgindx], attr,
1706                     flags, level, pfn) != 0) {
1707                         if (level == 0)
1708                                 panic("unexpected hati_load_common() failure");
1709                         --level;
1710                         pgsize = LEVEL_SIZE(level);
1711                 }
1712 
1713                 /*
1714                  * move to next page
1715                  */
1716                 va += pgsize;
1717                 pgindx += mmu_btop(pgsize);
1718         }
1719         XPV_ALLOW_MIGRATE();
1720 }
1721 
1722 /* ARGSUSED */
1723 void
1724 hat_memload_array_region(struct hat *hat, caddr_t addr, size_t len,
1725     struct page **pps, uint_t attr, uint_t flags,
1726     hat_region_cookie_t rcookie)
1727 {
1728         hat_memload_array(hat, addr, len, pps, attr, flags);
1729 }
1730 
1731 /*
1732  * void hat_devload(hat, addr, len, pf, attr, flags)
1733  *      load/lock the given page frame number
1734  *
1735  * Advisory ordering attributes. Apply only to device mappings.
1736  *
1737  * HAT_STRICTORDER: the CPU must issue the references in order, as the
1738  *      programmer specified.  This is the default.
1739  * HAT_UNORDERED_OK: the CPU may reorder the references (this is all kinds
1740  *      of reordering; store or load with store or load).
1741  * HAT_MERGING_OK: merging and batching: the CPU may merge individual stores
1742  *      to consecutive locations (for example, turn two consecutive byte
1743  *      stores into one halfword store), and it may batch individual loads
1744  *      (for example, turn two consecutive byte loads into one halfword load).
1745  *      This also implies re-ordering.
1746  * HAT_LOADCACHING_OK: the CPU may cache the data it fetches and reuse it
1747  *      until another store occurs.  The default is to fetch new data
1748  *      on every load.  This also implies merging.
1749  * HAT_STORECACHING_OK: the CPU may keep the data in the cache and push it to
1750  *      the device (perhaps with other data) at a later time.  The default is
1751  *      to push the data right away.  This also implies load caching.
1752  *
1753  * Equivalent of hat_memload(), but can be used for device memory where
1754  * there are no page_t's and we support additional flags (write merging, etc).
1755  * Note that we can have large page mappings with this interface.
1756  */
1757 int supported_devload_flags = HAT_LOAD | HAT_LOAD_LOCK |
1758         HAT_LOAD_NOCONSIST | HAT_STRICTORDER | HAT_UNORDERED_OK |
1759         HAT_MERGING_OK | HAT_LOADCACHING_OK | HAT_STORECACHING_OK;
1760 
1761 void
1762 hat_devload(
1763         hat_t           *hat,
1764         caddr_t         addr,
1765         size_t          len,
1766         pfn_t           pfn,
1767         uint_t          attr,
1768         int             flags)
1769 {
1770         uintptr_t       va = ALIGN2PAGE(addr);
1771         uintptr_t       eva = va + len;
1772         level_t         level;
1773         size_t          pgsize;
1774         page_t          *pp;
1775         int             f;      /* per PTE copy of flags  - maybe modified */
1776         uint_t          a;      /* per PTE copy of attr */
1777 
1778         XPV_DISALLOW_MIGRATE();
1779         ASSERT(IS_PAGEALIGNED(va));
1780         ASSERT(hat == kas.a_hat || eva <= _userlimit);
1781         ASSERT(hat == kas.a_hat || AS_LOCK_HELD(hat->hat_as));
1782         ASSERT((flags & supported_devload_flags) == flags);
1783 
1784         /*
1785          * handle all pages
1786          */
1787         while (va < eva) {
1788 
1789                 /*
1790                  * decide what level mapping to use (ie. pagesize)
1791                  */
1792                 for (level = mmu.max_page_level; ; --level) {
1793                         pgsize = LEVEL_SIZE(level);
1794                         if (level == 0)
1795                                 break;
1796                         if (IS_P2ALIGNED(va, pgsize) &&
1797                             (eva - va) >= pgsize &&
1798                             IS_P2ALIGNED(pfn, mmu_btop(pgsize))) {
1799 #ifdef DEBUG
1800                                 if (level == 2)
1801                                         map1gcnt++;
1802 #endif
1803                                 break;
1804                         }
1805                 }
1806 
1807                 /*
1808                  * If this is just memory then allow caching (this happens
1809                  * for the nucleus pages) - though HAT_PLAT_NOCACHE can be used
1810                  * to override that. If we don't have a page_t then make sure
1811                  * NOCONSIST is set.
1812                  */
1813                 a = attr;
1814                 f = flags;
1815                 if (!pf_is_memory(pfn))
1816                         f |= HAT_LOAD_NOCONSIST;
1817                 else if (!(a & HAT_PLAT_NOCACHE))
1818                         a |= HAT_STORECACHING_OK;
1819 
1820                 if (f & HAT_LOAD_NOCONSIST)
1821                         pp = NULL;
1822                 else
1823                         pp = page_numtopp_nolock(pfn);
1824 
1825                 /*
1826                  * Check to make sure we are really trying to map a valid
1827                  * memory page. The caller wishing to intentionally map
1828                  * free memory pages will have passed the HAT_LOAD_NOCONSIST
1829                  * flag, then pp will be NULL.
1830                  */
1831                 if (pp != NULL) {
1832                         if (PP_ISFREE(pp)) {
1833                                 panic("hat_devload: loading "
1834                                     "a mapping to free page %p", (void *)pp);
1835                         }
1836 
1837                         if (!PAGE_LOCKED(pp) && !PP_ISNORELOC(pp)) {
1838                                 panic("hat_devload: loading a mapping "
1839                                     "to an unlocked page %p",
1840                                     (void *)pp);
1841                         }
1842                 }
1843 
1844                 /*
1845                  * load this page mapping
1846                  */
1847                 ASSERT(!IN_VA_HOLE(va));
1848                 while (hati_load_common(hat, va, pp, a, f, level, pfn) != 0) {
1849                         if (level == 0)
1850                                 panic("unexpected hati_load_common() failure");
1851                         --level;
1852                         pgsize = LEVEL_SIZE(level);
1853                 }
1854 
1855                 /*
1856                  * move to next page
1857                  */
1858                 va += pgsize;
1859                 pfn += mmu_btop(pgsize);
1860         }
1861         XPV_ALLOW_MIGRATE();
1862 }
1863 
1864 /*
1865  * void hat_unlock(hat, addr, len)
1866  *      unlock the mappings to a given range of addresses
1867  *
1868  * Locks are tracked by ht_lock_cnt in the htable.
1869  */
1870 void
1871 hat_unlock(hat_t *hat, caddr_t addr, size_t len)
1872 {
1873         uintptr_t       vaddr = (uintptr_t)addr;
1874         uintptr_t       eaddr = vaddr + len;
1875         htable_t        *ht = NULL;
1876 
1877         /*
1878          * kernel entries are always locked, we don't track lock counts
1879          */
1880         ASSERT(hat == kas.a_hat || eaddr <= _userlimit);
1881         ASSERT(IS_PAGEALIGNED(vaddr));
1882         ASSERT(IS_PAGEALIGNED(eaddr));
1883         if (hat == kas.a_hat)
1884                 return;
1885         if (eaddr > _userlimit)
1886                 panic("hat_unlock() address out of range - above _userlimit");
1887 
1888         XPV_DISALLOW_MIGRATE();
1889         ASSERT(AS_LOCK_HELD(hat->hat_as));
1890         while (vaddr < eaddr) {
1891                 (void) htable_walk(hat, &ht, &vaddr, eaddr);
1892                 if (ht == NULL)
1893                         break;
1894 
1895                 ASSERT(!IN_VA_HOLE(vaddr));
1896 
1897                 if (ht->ht_lock_cnt < 1)
1898                         panic("hat_unlock(): lock_cnt < 1, "
1899                             "htable=%p, vaddr=%p\n", (void *)ht, (void *)vaddr);
1900                 HTABLE_LOCK_DEC(ht);
1901 
1902                 vaddr += LEVEL_SIZE(ht->ht_level);
1903         }
1904         if (ht)
1905                 htable_release(ht);
1906         XPV_ALLOW_MIGRATE();
1907 }
1908 
1909 /* ARGSUSED */
1910 void
1911 hat_unlock_region(struct hat *hat, caddr_t addr, size_t len,
1912     hat_region_cookie_t rcookie)
1913 {
1914         panic("No shared region support on x86");
1915 }
1916 
1917 #if !defined(__xpv)
1918 /*
1919  * Cross call service routine to demap a virtual page on
1920  * the current CPU or flush all mappings in TLB.
1921  */
1922 /*ARGSUSED*/
1923 static int
1924 hati_demap_func(xc_arg_t a1, xc_arg_t a2, xc_arg_t a3)
1925 {
1926         hat_t   *hat = (hat_t *)a1;
1927         caddr_t addr = (caddr_t)a2;
1928         size_t len = (size_t)a3;
1929 
1930         /*
1931          * If the target hat isn't the kernel and this CPU isn't operating
1932          * in the target hat, we can ignore the cross call.
1933          */
1934         if (hat != kas.a_hat && hat != CPU->cpu_current_hat)
1935                 return (0);
1936 
1937         /*
1938          * For a normal address, we flush a range of contiguous mappings
1939          */
1940         if ((uintptr_t)addr != DEMAP_ALL_ADDR) {
1941                 for (size_t i = 0; i < len; i += MMU_PAGESIZE)
1942                         mmu_tlbflush_entry(addr + i);
1943                 return (0);
1944         }
1945 
1946         /*
1947          * Otherwise we reload cr3 to effect a complete TLB flush.
1948          *
1949          * A reload of cr3 on a VLP process also means we must also recopy in
1950          * the pte values from the struct hat
1951          */
1952         if (hat->hat_flags & HAT_VLP) {
1953 #if defined(__amd64)
1954                 x86pte_t *vlpptep = CPU->cpu_hat_info->hci_vlp_l2ptes;
1955 
1956                 VLP_COPY(hat->hat_vlp_ptes, vlpptep);
1957 #elif defined(__i386)
1958                 reload_pae32(hat, CPU);
1959 #endif
1960         }
1961         reload_cr3();
1962         return (0);
1963 }
1964 
1965 /*
1966  * Flush all TLB entries, including global (ie. kernel) ones.
1967  */
1968 static void
1969 flush_all_tlb_entries(void)
1970 {
1971         ulong_t cr4 = getcr4();
1972 
1973         if (cr4 & CR4_PGE) {
1974                 setcr4(cr4 & ~(ulong_t)CR4_PGE);
1975                 setcr4(cr4);
1976 
1977                 /*
1978                  * 32 bit PAE also needs to always reload_cr3()
1979                  */
1980                 if (mmu.max_level == 2)
1981                         reload_cr3();
1982         } else {
1983                 reload_cr3();
1984         }
1985 }
1986 
1987 #define TLB_CPU_HALTED  (01ul)
1988 #define TLB_INVAL_ALL   (02ul)
1989 #define CAS_TLB_INFO(cpu, old, new)     \
1990         atomic_cas_ulong((ulong_t *)&(cpu)->cpu_m.mcpu_tlb_info, (old), (new))
1991 
1992 /*
1993  * Record that a CPU is going idle
1994  */
1995 void
1996 tlb_going_idle(void)
1997 {
1998         atomic_or_ulong((ulong_t *)&CPU->cpu_m.mcpu_tlb_info, TLB_CPU_HALTED);
1999 }
2000 
2001 /*
2002  * Service a delayed TLB flush if coming out of being idle.
2003  * It will be called from cpu idle notification with interrupt disabled.
2004  */
2005 void
2006 tlb_service(void)
2007 {
2008         ulong_t tlb_info;
2009         ulong_t found;
2010 
2011         /*
2012          * We only have to do something if coming out of being idle.
2013          */
2014         tlb_info = CPU->cpu_m.mcpu_tlb_info;
2015         if (tlb_info & TLB_CPU_HALTED) {
2016                 ASSERT(CPU->cpu_current_hat == kas.a_hat);
2017 
2018                 /*
2019                  * Atomic clear and fetch of old state.
2020                  */
2021                 while ((found = CAS_TLB_INFO(CPU, tlb_info, 0)) != tlb_info) {
2022                         ASSERT(found & TLB_CPU_HALTED);
2023                         tlb_info = found;
2024                         SMT_PAUSE();
2025                 }
2026                 if (tlb_info & TLB_INVAL_ALL)
2027                         flush_all_tlb_entries();
2028         }
2029 }
2030 #endif /* !__xpv */
2031 
2032 /*
2033  * Internal routine to do cross calls to invalidate a range of pages on
2034  * all CPUs using a given hat.
2035  */
2036 void
2037 hat_tlb_inval_range(hat_t *hat, uintptr_t va, size_t len)
2038 {
2039         extern int      flushes_require_xcalls; /* from mp_startup.c */
2040         cpuset_t        justme;
2041         cpuset_t        cpus_to_shootdown;
2042 #ifndef __xpv
2043         cpuset_t        check_cpus;
2044         cpu_t           *cpup;
2045         int             c;
2046 #endif
2047 
2048         /*
2049          * If the hat is being destroyed, there are no more users, so
2050          * demap need not do anything.
2051          */
2052         if (hat->hat_flags & HAT_FREEING)
2053                 return;
2054 
2055         /*
2056          * If demapping from a shared pagetable, we best demap the
2057          * entire set of user TLBs, since we don't know what addresses
2058          * these were shared at.
2059          */
2060         if (hat->hat_flags & HAT_SHARED) {
2061                 hat = kas.a_hat;
2062                 va = DEMAP_ALL_ADDR;
2063         }
2064 
2065         /*
2066          * if not running with multiple CPUs, don't use cross calls
2067          */
2068         if (panicstr || !flushes_require_xcalls) {
2069 #ifdef __xpv
2070                 if (va == DEMAP_ALL_ADDR) {
2071                         xen_flush_tlb();
2072                 } else {
2073                         for (size_t i = 0; i < len; i += MMU_PAGESIZE)
2074                                 xen_flush_va((caddr_t)(va + i));
2075                 }
2076 #else
2077                 (void) hati_demap_func((xc_arg_t)hat,
2078                     (xc_arg_t)va, (xc_arg_t)len);
2079 #endif
2080                 return;
2081         }
2082 
2083 
2084         /*
2085          * Determine CPUs to shootdown. Kernel changes always do all CPUs.
2086          * Otherwise it's just CPUs currently executing in this hat.
2087          */
2088         kpreempt_disable();
2089         CPUSET_ONLY(justme, CPU->cpu_id);
2090         if (hat == kas.a_hat)
2091                 cpus_to_shootdown = khat_cpuset;
2092         else
2093                 cpus_to_shootdown = hat->hat_cpus;
2094 
2095 #ifndef __xpv
2096         /*
2097          * If any CPUs in the set are idle, just request a delayed flush
2098          * and avoid waking them up.
2099          */
2100         check_cpus = cpus_to_shootdown;
2101         for (c = 0; c < NCPU && !CPUSET_ISNULL(check_cpus); ++c) {
2102                 ulong_t tlb_info;
2103 
2104                 if (!CPU_IN_SET(check_cpus, c))
2105                         continue;
2106                 CPUSET_DEL(check_cpus, c);
2107                 cpup = cpu[c];
2108                 if (cpup == NULL)
2109                         continue;
2110 
2111                 tlb_info = cpup->cpu_m.mcpu_tlb_info;
2112                 while (tlb_info == TLB_CPU_HALTED) {
2113                         (void) CAS_TLB_INFO(cpup, TLB_CPU_HALTED,
2114                             TLB_CPU_HALTED | TLB_INVAL_ALL);
2115                         SMT_PAUSE();
2116                         tlb_info = cpup->cpu_m.mcpu_tlb_info;
2117                 }
2118                 if (tlb_info == (TLB_CPU_HALTED | TLB_INVAL_ALL)) {
2119                         HATSTAT_INC(hs_tlb_inval_delayed);
2120                         CPUSET_DEL(cpus_to_shootdown, c);
2121                 }
2122         }
2123 #endif
2124 
2125         if (CPUSET_ISNULL(cpus_to_shootdown) ||
2126             CPUSET_ISEQUAL(cpus_to_shootdown, justme)) {
2127 
2128 #ifdef __xpv
2129                 if (va == DEMAP_ALL_ADDR) {
2130                         xen_flush_tlb();
2131                 } else {
2132                         for (size_t i = 0; i < len; i += MMU_PAGESIZE)
2133                                 xen_flush_va((caddr_t)(va + i));
2134                 }
2135 #else
2136                 (void) hati_demap_func((xc_arg_t)hat,
2137                     (xc_arg_t)va, (xc_arg_t)len);
2138 #endif
2139 
2140         } else {
2141 
2142                 CPUSET_ADD(cpus_to_shootdown, CPU->cpu_id);
2143 #ifdef __xpv
2144                 if (va == DEMAP_ALL_ADDR) {
2145                         xen_gflush_tlb(cpus_to_shootdown);
2146                 } else {
2147                         for (size_t i = 0; i < len; i += MMU_PAGESIZE) {
2148                                 xen_gflush_va((caddr_t)(va + i),
2149                                     cpus_to_shootdown);
2150                         }
2151                 }
2152 #else
2153                 xc_call((xc_arg_t)hat, (xc_arg_t)va, (xc_arg_t)len,
2154                     CPUSET2BV(cpus_to_shootdown), hati_demap_func);
2155 #endif
2156 
2157         }
2158         kpreempt_enable();
2159 }
2160 
2161 void
2162 hat_tlb_inval(hat_t *hat, uintptr_t va)
2163 {
2164         hat_tlb_inval_range(hat, va, MMU_PAGESIZE);
2165 }
2166 
2167 /*
2168  * Interior routine for HAT_UNLOADs from hat_unload_callback(),
2169  * hat_kmap_unload() OR from hat_steal() code.  This routine doesn't
2170  * handle releasing of the htables.
2171  */
2172 void
2173 hat_pte_unmap(
2174         htable_t        *ht,
2175         uint_t          entry,
2176         uint_t          flags,
2177         x86pte_t        old_pte,
2178         void            *pte_ptr,
2179         boolean_t       tlb)
2180 {
2181         hat_t           *hat = ht->ht_hat;
2182         hment_t         *hm = NULL;
2183         page_t          *pp = NULL;
2184         level_t         l = ht->ht_level;
2185         pfn_t           pfn;
2186 
2187         /*
2188          * We always track the locking counts, even if nothing is unmapped
2189          */
2190         if ((flags & HAT_UNLOAD_UNLOCK) != 0 && hat != kas.a_hat) {
2191                 ASSERT(ht->ht_lock_cnt > 0);
2192                 HTABLE_LOCK_DEC(ht);
2193         }
2194 
2195         /*
2196          * Figure out which page's mapping list lock to acquire using the PFN
2197          * passed in "old" PTE. We then attempt to invalidate the PTE.
2198          * If another thread, probably a hat_pageunload, has asynchronously
2199          * unmapped/remapped this address we'll loop here.
2200          */
2201         ASSERT(ht->ht_busy > 0);
2202         while (PTE_ISVALID(old_pte)) {
2203                 pfn = PTE2PFN(old_pte, l);
2204                 if (PTE_GET(old_pte, PT_SOFTWARE) >= PT_NOCONSIST) {
2205                         pp = NULL;
2206                 } else {
2207 #ifdef __xpv
2208                         if (pfn == PFN_INVALID)
2209                                 panic("Invalid PFN, but not PT_NOCONSIST");
2210 #endif
2211                         pp = page_numtopp_nolock(pfn);
2212                         if (pp == NULL) {
2213                                 panic("no page_t, not NOCONSIST: old_pte="
2214                                     FMT_PTE " ht=%lx entry=0x%x pte_ptr=%lx",
2215                                     old_pte, (uintptr_t)ht, entry,
2216                                     (uintptr_t)pte_ptr);
2217                         }
2218                         x86_hm_enter(pp);
2219                 }
2220 
2221                 old_pte = x86pte_inval(ht, entry, old_pte, pte_ptr, tlb);
2222 
2223                 /*
2224                  * If the page hadn't changed we've unmapped it and can proceed
2225                  */
2226                 if (PTE_ISVALID(old_pte) && PTE2PFN(old_pte, l) == pfn)
2227                         break;
2228 
2229                 /*
2230                  * Otherwise, we'll have to retry with the current old_pte.
2231                  * Drop the hment lock, since the pfn may have changed.
2232                  */
2233                 if (pp != NULL) {
2234                         x86_hm_exit(pp);
2235                         pp = NULL;
2236                 } else {
2237                         ASSERT(PTE_GET(old_pte, PT_SOFTWARE) >= PT_NOCONSIST);
2238                 }
2239         }
2240 
2241         /*
2242          * If the old mapping wasn't valid, there's nothing more to do
2243          */
2244         if (!PTE_ISVALID(old_pte)) {
2245                 if (pp != NULL)
2246                         x86_hm_exit(pp);
2247                 return;
2248         }
2249 
2250         /*
2251          * Take care of syncing any MOD/REF bits and removing the hment.
2252          */
2253         if (pp != NULL) {
2254                 if (!(flags & HAT_UNLOAD_NOSYNC))
2255                         hati_sync_pte_to_page(pp, old_pte, l);
2256                 hm = hment_remove(pp, ht, entry);
2257                 x86_hm_exit(pp);
2258                 if (hm != NULL)
2259                         hment_free(hm);
2260         }
2261 
2262         /*
2263          * Handle book keeping in the htable and hat
2264          */
2265         ASSERT(ht->ht_valid_cnt > 0);
2266         HTABLE_DEC(ht->ht_valid_cnt);
2267         PGCNT_DEC(hat, l);
2268 }
2269 
2270 /*
2271  * very cheap unload implementation to special case some kernel addresses
2272  */
2273 static void
2274 hat_kmap_unload(caddr_t addr, size_t len, uint_t flags)
2275 {
2276         uintptr_t       va = (uintptr_t)addr;
2277         uintptr_t       eva = va + len;
2278         pgcnt_t         pg_index;
2279         htable_t        *ht;
2280         uint_t          entry;
2281         x86pte_t        *pte_ptr;
2282         x86pte_t        old_pte;
2283 
2284         for (; va < eva; va += MMU_PAGESIZE) {
2285                 /*
2286                  * Get the PTE
2287                  */
2288                 pg_index = mmu_btop(va - mmu.kmap_addr);
2289                 pte_ptr = PT_INDEX_PTR(mmu.kmap_ptes, pg_index);
2290                 old_pte = GET_PTE(pte_ptr);
2291 
2292                 /*
2293                  * get the htable / entry
2294                  */
2295                 ht = mmu.kmap_htables[(va - mmu.kmap_htables[0]->ht_vaddr)
2296                     >> LEVEL_SHIFT(1)];
2297                 entry = htable_va2entry(va, ht);
2298 
2299                 /*
2300                  * use mostly common code to unmap it.
2301                  */
2302                 hat_pte_unmap(ht, entry, flags, old_pte, pte_ptr, B_TRUE);
2303         }
2304 }
2305 
2306 
2307 /*
2308  * unload a range of virtual address space (no callback)
2309  */
2310 void
2311 hat_unload(hat_t *hat, caddr_t addr, size_t len, uint_t flags)
2312 {
2313         uintptr_t va = (uintptr_t)addr;
2314 
2315         XPV_DISALLOW_MIGRATE();
2316         ASSERT(hat == kas.a_hat || va + len <= _userlimit);
2317 
2318         /*
2319          * special case for performance.
2320          */
2321         if (mmu.kmap_addr <= va && va < mmu.kmap_eaddr) {
2322                 ASSERT(hat == kas.a_hat);
2323                 hat_kmap_unload(addr, len, flags);
2324         } else {
2325                 hat_unload_callback(hat, addr, len, flags, NULL);
2326         }
2327         XPV_ALLOW_MIGRATE();
2328 }
2329 
2330 /*
2331  * Do the callbacks for ranges being unloaded.
2332  */
2333 typedef struct range_info {
2334         uintptr_t       rng_va;
2335         ulong_t         rng_cnt;
2336         level_t         rng_level;
2337 } range_info_t;
2338 
2339 /*
2340  * Invalidate the TLB, and perform the callback to the upper level VM system,
2341  * for the specified ranges of contiguous pages.
2342  */
2343 static void
2344 handle_ranges(hat_t *hat, hat_callback_t *cb, uint_t cnt, range_info_t *range)
2345 {
2346         while (cnt > 0) {
2347                 size_t len;
2348 
2349                 --cnt;
2350                 len = range[cnt].rng_cnt << LEVEL_SHIFT(range[cnt].rng_level);
2351                 hat_tlb_inval_range(hat, (uintptr_t)range[cnt].rng_va, len);
2352 
2353                 if (cb != NULL) {
2354                         cb->hcb_start_addr = (caddr_t)range[cnt].rng_va;
2355                         cb->hcb_end_addr = cb->hcb_start_addr;
2356                         cb->hcb_end_addr += len;
2357                         cb->hcb_function(cb);
2358                 }
2359         }
2360 }
2361 
2362 /*
2363  * Unload a given range of addresses (has optional callback)
2364  *
2365  * Flags:
2366  * define       HAT_UNLOAD              0x00
2367  * define       HAT_UNLOAD_NOSYNC       0x02
2368  * define       HAT_UNLOAD_UNLOCK       0x04
2369  * define       HAT_UNLOAD_OTHER        0x08 - not used
2370  * define       HAT_UNLOAD_UNMAP        0x10 - same as HAT_UNLOAD
2371  */
2372 #define MAX_UNLOAD_CNT (8)
2373 void
2374 hat_unload_callback(
2375         hat_t           *hat,
2376         caddr_t         addr,
2377         size_t          len,
2378         uint_t          flags,
2379         hat_callback_t  *cb)
2380 {
2381         uintptr_t       vaddr = (uintptr_t)addr;
2382         uintptr_t       eaddr = vaddr + len;
2383         htable_t        *ht = NULL;
2384         uint_t          entry;
2385         uintptr_t       contig_va = (uintptr_t)-1L;
2386         range_info_t    r[MAX_UNLOAD_CNT];
2387         uint_t          r_cnt = 0;
2388         x86pte_t        old_pte;
2389 
2390         XPV_DISALLOW_MIGRATE();
2391         ASSERT(hat == kas.a_hat || eaddr <= _userlimit);
2392         ASSERT(IS_PAGEALIGNED(vaddr));
2393         ASSERT(IS_PAGEALIGNED(eaddr));
2394 
2395         /*
2396          * Special case a single page being unloaded for speed. This happens
2397          * quite frequently, COW faults after a fork() for example.
2398          */
2399         if (cb == NULL && len == MMU_PAGESIZE) {
2400                 ht = htable_getpte(hat, vaddr, &entry, &old_pte, 0);
2401                 if (ht != NULL) {
2402                         if (PTE_ISVALID(old_pte)) {
2403                                 hat_pte_unmap(ht, entry, flags, old_pte,
2404                                     NULL, B_TRUE);
2405                         }
2406                         htable_release(ht);
2407                 }
2408                 XPV_ALLOW_MIGRATE();
2409                 return;
2410         }
2411 
2412         while (vaddr < eaddr) {
2413                 old_pte = htable_walk(hat, &ht, &vaddr, eaddr);
2414                 if (ht == NULL)
2415                         break;
2416 
2417                 ASSERT(!IN_VA_HOLE(vaddr));
2418 
2419                 if (vaddr < (uintptr_t)addr)
2420                         panic("hat_unload_callback(): unmap inside large page");
2421 
2422                 /*
2423                  * We'll do the call backs for contiguous ranges
2424                  */
2425                 if (vaddr != contig_va ||
2426                     (r_cnt > 0 && r[r_cnt - 1].rng_level != ht->ht_level)) {
2427                         if (r_cnt == MAX_UNLOAD_CNT) {
2428                                 handle_ranges(hat, cb, r_cnt, r);
2429                                 r_cnt = 0;
2430                         }
2431                         r[r_cnt].rng_va = vaddr;
2432                         r[r_cnt].rng_cnt = 0;
2433                         r[r_cnt].rng_level = ht->ht_level;
2434                         ++r_cnt;
2435                 }
2436 
2437                 /*
2438                  * Unload one mapping (for a single page) from the page tables.
2439                  * Note that we do not remove the mapping from the TLB yet,
2440                  * as indicated by the tlb=FALSE argument to hat_pte_unmap().
2441                  * handle_ranges() will clear the TLB entries with one call to
2442                  * hat_tlb_inval_range() per contiguous range.  This is
2443                  * safe because the page can not be reused until the
2444                  * callback is made (or we return).
2445                  */
2446                 entry = htable_va2entry(vaddr, ht);
2447                 hat_pte_unmap(ht, entry, flags, old_pte, NULL, B_FALSE);
2448                 ASSERT(ht->ht_level <= mmu.max_page_level);
2449                 vaddr += LEVEL_SIZE(ht->ht_level);
2450                 contig_va = vaddr;
2451                 ++r[r_cnt - 1].rng_cnt;
2452         }
2453         if (ht)
2454                 htable_release(ht);
2455 
2456         /*
2457          * handle last range for callbacks
2458          */
2459         if (r_cnt > 0)
2460                 handle_ranges(hat, cb, r_cnt, r);
2461         XPV_ALLOW_MIGRATE();
2462 }
2463 
2464 /*
2465  * Invalidate a virtual address translation on a slave CPU during
2466  * panic() dumps.
2467  */
2468 void
2469 hat_flush_range(hat_t *hat, caddr_t va, size_t size)
2470 {
2471         ssize_t sz;
2472         caddr_t endva = va + size;
2473 
2474         while (va < endva) {
2475                 sz = hat_getpagesize(hat, va);
2476                 if (sz < 0) {
2477 #ifdef __xpv
2478                         xen_flush_tlb();
2479 #else
2480                         flush_all_tlb_entries();
2481 #endif
2482                         break;
2483                 }
2484 #ifdef __xpv
2485                 xen_flush_va(va);
2486 #else
2487                 mmu_tlbflush_entry(va);
2488 #endif
2489                 va += sz;
2490         }
2491 }
2492 
2493 /*
2494  * synchronize mapping with software data structures
2495  *
2496  * This interface is currently only used by the working set monitor
2497  * driver.
2498  */
2499 /*ARGSUSED*/
2500 void
2501 hat_sync(hat_t *hat, caddr_t addr, size_t len, uint_t flags)
2502 {
2503         uintptr_t       vaddr = (uintptr_t)addr;
2504         uintptr_t       eaddr = vaddr + len;
2505         htable_t        *ht = NULL;
2506         uint_t          entry;
2507         x86pte_t        pte;
2508         x86pte_t        save_pte;
2509         x86pte_t        new;
2510         page_t          *pp;
2511 
2512         ASSERT(!IN_VA_HOLE(vaddr));
2513         ASSERT(IS_PAGEALIGNED(vaddr));
2514         ASSERT(IS_PAGEALIGNED(eaddr));
2515         ASSERT(hat == kas.a_hat || eaddr <= _userlimit);
2516 
2517         XPV_DISALLOW_MIGRATE();
2518         for (; vaddr < eaddr; vaddr += LEVEL_SIZE(ht->ht_level)) {
2519 try_again:
2520                 pte = htable_walk(hat, &ht, &vaddr, eaddr);
2521                 if (ht == NULL)
2522                         break;
2523                 entry = htable_va2entry(vaddr, ht);
2524 
2525                 if (PTE_GET(pte, PT_SOFTWARE) >= PT_NOSYNC ||
2526                     PTE_GET(pte, PT_REF | PT_MOD) == 0)
2527                         continue;
2528 
2529                 /*
2530                  * We need to acquire the mapping list lock to protect
2531                  * against hat_pageunload(), hat_unload(), etc.
2532                  */
2533                 pp = page_numtopp_nolock(PTE2PFN(pte, ht->ht_level));
2534                 if (pp == NULL)
2535                         break;
2536                 x86_hm_enter(pp);
2537                 save_pte = pte;
2538                 pte = x86pte_get(ht, entry);
2539                 if (pte != save_pte) {
2540                         x86_hm_exit(pp);
2541                         goto try_again;
2542                 }
2543                 if (PTE_GET(pte, PT_SOFTWARE) >= PT_NOSYNC ||
2544                     PTE_GET(pte, PT_REF | PT_MOD) == 0) {
2545                         x86_hm_exit(pp);
2546                         continue;
2547                 }
2548 
2549                 /*
2550                  * Need to clear ref or mod bits. We may compete with
2551                  * hardware updating the R/M bits and have to try again.
2552                  */
2553                 if (flags == HAT_SYNC_ZERORM) {
2554                         new = pte;
2555                         PTE_CLR(new, PT_REF | PT_MOD);
2556                         pte = hati_update_pte(ht, entry, pte, new);
2557                         if (pte != 0) {
2558                                 x86_hm_exit(pp);
2559                                 goto try_again;
2560                         }
2561                 } else {
2562                         /*
2563                          * sync the PTE to the page_t
2564                          */
2565                         hati_sync_pte_to_page(pp, save_pte, ht->ht_level);
2566                 }
2567                 x86_hm_exit(pp);
2568         }
2569         if (ht)
2570                 htable_release(ht);
2571         XPV_ALLOW_MIGRATE();
2572 }
2573 
2574 /*
2575  * void hat_map(hat, addr, len, flags)
2576  */
2577 /*ARGSUSED*/
2578 void
2579 hat_map(hat_t *hat, caddr_t addr, size_t len, uint_t flags)
2580 {
2581         /* does nothing */
2582 }
2583 
2584 /*
2585  * uint_t hat_getattr(hat, addr, *attr)
2586  *      returns attr for <hat,addr> in *attr.  returns 0 if there was a
2587  *      mapping and *attr is valid, nonzero if there was no mapping and
2588  *      *attr is not valid.
2589  */
2590 uint_t
2591 hat_getattr(hat_t *hat, caddr_t addr, uint_t *attr)
2592 {
2593         uintptr_t       vaddr = ALIGN2PAGE(addr);
2594         htable_t        *ht = NULL;
2595         x86pte_t        pte;
2596 
2597         ASSERT(hat == kas.a_hat || vaddr <= _userlimit);
2598 
2599         if (IN_VA_HOLE(vaddr))
2600                 return ((uint_t)-1);
2601 
2602         ht = htable_getpte(hat, vaddr, NULL, &pte, mmu.max_page_level);
2603         if (ht == NULL)
2604                 return ((uint_t)-1);
2605 
2606         if (!PTE_ISVALID(pte) || !PTE_ISPAGE(pte, ht->ht_level)) {
2607                 htable_release(ht);
2608                 return ((uint_t)-1);
2609         }
2610 
2611         *attr = PROT_READ;
2612         if (PTE_GET(pte, PT_WRITABLE))
2613                 *attr |= PROT_WRITE;
2614         if (PTE_GET(pte, PT_USER))
2615                 *attr |= PROT_USER;
2616         if (!PTE_GET(pte, mmu.pt_nx))
2617                 *attr |= PROT_EXEC;
2618         if (PTE_GET(pte, PT_SOFTWARE) >= PT_NOSYNC)
2619                 *attr |= HAT_NOSYNC;
2620         htable_release(ht);
2621         return (0);
2622 }
2623 
2624 /*
2625  * hat_updateattr() applies the given attribute change to an existing mapping
2626  */
2627 #define HAT_LOAD_ATTR           1
2628 #define HAT_SET_ATTR            2
2629 #define HAT_CLR_ATTR            3
2630 
2631 static void
2632 hat_updateattr(hat_t *hat, caddr_t addr, size_t len, uint_t attr, int what)
2633 {
2634         uintptr_t       vaddr = (uintptr_t)addr;
2635         uintptr_t       eaddr = (uintptr_t)addr + len;
2636         htable_t        *ht = NULL;
2637         uint_t          entry;
2638         x86pte_t        oldpte, newpte;
2639         page_t          *pp;
2640 
2641         XPV_DISALLOW_MIGRATE();
2642         ASSERT(IS_PAGEALIGNED(vaddr));
2643         ASSERT(IS_PAGEALIGNED(eaddr));
2644         ASSERT(hat == kas.a_hat || AS_LOCK_HELD(hat->hat_as));
2645         for (; vaddr < eaddr; vaddr += LEVEL_SIZE(ht->ht_level)) {
2646 try_again:
2647                 oldpte = htable_walk(hat, &ht, &vaddr, eaddr);
2648                 if (ht == NULL)
2649                         break;
2650                 if (PTE_GET(oldpte, PT_SOFTWARE) >= PT_NOCONSIST)
2651                         continue;
2652 
2653                 pp = page_numtopp_nolock(PTE2PFN(oldpte, ht->ht_level));
2654                 if (pp == NULL)
2655                         continue;
2656                 x86_hm_enter(pp);
2657 
2658                 newpte = oldpte;
2659                 /*
2660                  * We found a page table entry in the desired range,
2661                  * figure out the new attributes.
2662                  */
2663                 if (what == HAT_SET_ATTR || what == HAT_LOAD_ATTR) {
2664                         if ((attr & PROT_WRITE) &&
2665                             !PTE_GET(oldpte, PT_WRITABLE))
2666                                 newpte |= PT_WRITABLE;
2667 
2668                         if ((attr & HAT_NOSYNC) &&
2669                             PTE_GET(oldpte, PT_SOFTWARE) < PT_NOSYNC)
2670                                 newpte |= PT_NOSYNC;
2671 
2672                         if ((attr & PROT_EXEC) && PTE_GET(oldpte, mmu.pt_nx))
2673                                 newpte &= ~mmu.pt_nx;
2674                 }
2675 
2676                 if (what == HAT_LOAD_ATTR) {
2677                         if (!(attr & PROT_WRITE) &&
2678                             PTE_GET(oldpte, PT_WRITABLE))
2679                                 newpte &= ~PT_WRITABLE;
2680 
2681                         if (!(attr & HAT_NOSYNC) &&
2682                             PTE_GET(oldpte, PT_SOFTWARE) >= PT_NOSYNC)
2683                                 newpte &= ~PT_SOFTWARE;
2684 
2685                         if (!(attr & PROT_EXEC) && !PTE_GET(oldpte, mmu.pt_nx))
2686                                 newpte |= mmu.pt_nx;
2687                 }
2688 
2689                 if (what == HAT_CLR_ATTR) {
2690                         if ((attr & PROT_WRITE) && PTE_GET(oldpte, PT_WRITABLE))
2691                                 newpte &= ~PT_WRITABLE;
2692 
2693                         if ((attr & HAT_NOSYNC) &&
2694                             PTE_GET(oldpte, PT_SOFTWARE) >= PT_NOSYNC)
2695                                 newpte &= ~PT_SOFTWARE;
2696 
2697                         if ((attr & PROT_EXEC) && !PTE_GET(oldpte, mmu.pt_nx))
2698                                 newpte |= mmu.pt_nx;
2699                 }
2700 
2701                 /*
2702                  * Ensure NOSYNC/NOCONSIST mappings have REF and MOD set.
2703                  * x86pte_set() depends on this.
2704                  */
2705                 if (PTE_GET(newpte, PT_SOFTWARE) >= PT_NOSYNC)
2706                         newpte |= PT_REF | PT_MOD;
2707 
2708                 /*
2709                  * what about PROT_READ or others? this code only handles:
2710                  * EXEC, WRITE, NOSYNC
2711                  */
2712 
2713                 /*
2714                  * If new PTE really changed, update the table.
2715                  */
2716                 if (newpte != oldpte) {
2717                         entry = htable_va2entry(vaddr, ht);
2718                         oldpte = hati_update_pte(ht, entry, oldpte, newpte);
2719                         if (oldpte != 0) {
2720                                 x86_hm_exit(pp);
2721                                 goto try_again;
2722                         }
2723                 }
2724                 x86_hm_exit(pp);
2725         }
2726         if (ht)
2727                 htable_release(ht);
2728         XPV_ALLOW_MIGRATE();
2729 }
2730 
2731 /*
2732  * Various wrappers for hat_updateattr()
2733  */
2734 void
2735 hat_setattr(hat_t *hat, caddr_t addr, size_t len, uint_t attr)
2736 {
2737         ASSERT(hat == kas.a_hat || (uintptr_t)addr + len <= _userlimit);
2738         hat_updateattr(hat, addr, len, attr, HAT_SET_ATTR);
2739 }
2740 
2741 void
2742 hat_clrattr(hat_t *hat, caddr_t addr, size_t len, uint_t attr)
2743 {
2744         ASSERT(hat == kas.a_hat || (uintptr_t)addr + len <= _userlimit);
2745         hat_updateattr(hat, addr, len, attr, HAT_CLR_ATTR);
2746 }
2747 
2748 void
2749 hat_chgattr(hat_t *hat, caddr_t addr, size_t len, uint_t attr)
2750 {
2751         ASSERT(hat == kas.a_hat || (uintptr_t)addr + len <= _userlimit);
2752         hat_updateattr(hat, addr, len, attr, HAT_LOAD_ATTR);
2753 }
2754 
2755 void
2756 hat_chgprot(hat_t *hat, caddr_t addr, size_t len, uint_t vprot)
2757 {
2758         ASSERT(hat == kas.a_hat || (uintptr_t)addr + len <= _userlimit);
2759         hat_updateattr(hat, addr, len, vprot & HAT_PROT_MASK, HAT_LOAD_ATTR);
2760 }
2761 
2762 /*
2763  * size_t hat_getpagesize(hat, addr)
2764  *      returns pagesize in bytes for <hat, addr>. returns -1 of there is
2765  *      no mapping. This is an advisory call.
2766  */
2767 ssize_t
2768 hat_getpagesize(hat_t *hat, caddr_t addr)
2769 {
2770         uintptr_t       vaddr = ALIGN2PAGE(addr);
2771         htable_t        *ht;
2772         size_t          pagesize;
2773 
2774         ASSERT(hat == kas.a_hat || vaddr <= _userlimit);
2775         if (IN_VA_HOLE(vaddr))
2776                 return (-1);
2777         ht = htable_getpage(hat, vaddr, NULL);
2778         if (ht == NULL)
2779                 return (-1);
2780         pagesize = LEVEL_SIZE(ht->ht_level);
2781         htable_release(ht);
2782         return (pagesize);
2783 }
2784 
2785 
2786 
2787 /*
2788  * pfn_t hat_getpfnum(hat, addr)
2789  *      returns pfn for <hat, addr> or PFN_INVALID if mapping is invalid.
2790  */
2791 pfn_t
2792 hat_getpfnum(hat_t *hat, caddr_t addr)
2793 {
2794         uintptr_t       vaddr = ALIGN2PAGE(addr);
2795         htable_t        *ht;
2796         uint_t          entry;
2797         pfn_t           pfn = PFN_INVALID;
2798 
2799         ASSERT(hat == kas.a_hat || vaddr <= _userlimit);
2800         if (khat_running == 0)
2801                 return (PFN_INVALID);
2802 
2803         if (IN_VA_HOLE(vaddr))
2804                 return (PFN_INVALID);
2805 
2806         XPV_DISALLOW_MIGRATE();
2807         /*
2808          * A very common use of hat_getpfnum() is from the DDI for kernel pages.
2809          * Use the kmap_ptes (which also covers the 32 bit heap) to speed
2810          * this up.
2811          */
2812         if (mmu.kmap_addr <= vaddr && vaddr < mmu.kmap_eaddr) {
2813                 x86pte_t pte;
2814                 pgcnt_t pg_index;
2815 
2816                 pg_index = mmu_btop(vaddr - mmu.kmap_addr);
2817                 pte = GET_PTE(PT_INDEX_PTR(mmu.kmap_ptes, pg_index));
2818                 if (PTE_ISVALID(pte))
2819                         /*LINTED [use of constant 0 causes a lint warning] */
2820                         pfn = PTE2PFN(pte, 0);
2821                 XPV_ALLOW_MIGRATE();
2822                 return (pfn);
2823         }
2824 
2825         ht = htable_getpage(hat, vaddr, &entry);
2826         if (ht == NULL) {
2827                 XPV_ALLOW_MIGRATE();
2828                 return (PFN_INVALID);
2829         }
2830         ASSERT(vaddr >= ht->ht_vaddr);
2831         ASSERT(vaddr <= HTABLE_LAST_PAGE(ht));
2832         pfn = PTE2PFN(x86pte_get(ht, entry), ht->ht_level);
2833         if (ht->ht_level > 0)
2834                 pfn += mmu_btop(vaddr & LEVEL_OFFSET(ht->ht_level));
2835         htable_release(ht);
2836         XPV_ALLOW_MIGRATE();
2837         return (pfn);
2838 }
2839 
2840 /*
2841  * int hat_probe(hat, addr)
2842  *      return 0 if no valid mapping is present.  Faster version
2843  *      of hat_getattr in certain architectures.
2844  */
2845 int
2846 hat_probe(hat_t *hat, caddr_t addr)
2847 {
2848         uintptr_t       vaddr = ALIGN2PAGE(addr);
2849         uint_t          entry;
2850         htable_t        *ht;
2851         pgcnt_t         pg_off;
2852 
2853         ASSERT(hat == kas.a_hat || vaddr <= _userlimit);
2854         ASSERT(hat == kas.a_hat || AS_LOCK_HELD(hat->hat_as));
2855         if (IN_VA_HOLE(vaddr))
2856                 return (0);
2857 
2858         /*
2859          * Most common use of hat_probe is from segmap. We special case it
2860          * for performance.
2861          */
2862         if (mmu.kmap_addr <= vaddr && vaddr < mmu.kmap_eaddr) {
2863                 pg_off = mmu_btop(vaddr - mmu.kmap_addr);
2864                 if (mmu.pae_hat)
2865                         return (PTE_ISVALID(mmu.kmap_ptes[pg_off]));
2866                 else
2867                         return (PTE_ISVALID(
2868                             ((x86pte32_t *)mmu.kmap_ptes)[pg_off]));
2869         }
2870 
2871         ht = htable_getpage(hat, vaddr, &entry);
2872         htable_release(ht);
2873         return (ht != NULL);
2874 }
2875 
2876 /*
2877  * Find out if the segment for hat_share()/hat_unshare() is DISM or locked ISM.
2878  */
2879 static int
2880 is_it_dism(hat_t *hat, caddr_t va)
2881 {
2882         struct seg *seg;
2883         struct shm_data *shmd;
2884         struct spt_data *sptd;
2885 
2886         seg = as_findseg(hat->hat_as, va, 0);
2887         ASSERT(seg != NULL);
2888         ASSERT(seg->s_base <= va);
2889         shmd = (struct shm_data *)seg->s_data;
2890         ASSERT(shmd != NULL);
2891         sptd = (struct spt_data *)shmd->shm_sptseg->s_data;
2892         ASSERT(sptd != NULL);
2893         if (sptd->spt_flags & SHM_PAGEABLE)
2894                 return (1);
2895         return (0);
2896 }
2897 
2898 /*
2899  * Simple implementation of ISM. hat_share() is similar to hat_memload_array(),
2900  * except that we use the ism_hat's existing mappings to determine the pages
2901  * and protections to use for this hat. If we find a full properly aligned
2902  * and sized pagetable, we will attempt to share the pagetable itself.
2903  */
2904 /*ARGSUSED*/
2905 int
2906 hat_share(
2907         hat_t           *hat,
2908         caddr_t         addr,
2909         hat_t           *ism_hat,
2910         caddr_t         src_addr,
2911         size_t          len,    /* almost useless value, see below.. */
2912         uint_t          ismszc)
2913 {
2914         uintptr_t       vaddr_start = (uintptr_t)addr;
2915         uintptr_t       vaddr;
2916         uintptr_t       eaddr = vaddr_start + len;
2917         uintptr_t       ism_addr_start = (uintptr_t)src_addr;
2918         uintptr_t       ism_addr = ism_addr_start;
2919         uintptr_t       e_ism_addr = ism_addr + len;
2920         htable_t        *ism_ht = NULL;
2921         htable_t        *ht;
2922         x86pte_t        pte;
2923         page_t          *pp;
2924         pfn_t           pfn;
2925         level_t         l;
2926         pgcnt_t         pgcnt;
2927         uint_t          prot;
2928         int             is_dism;
2929         int             flags;
2930 
2931         /*
2932          * We might be asked to share an empty DISM hat by as_dup()
2933          */
2934         ASSERT(hat != kas.a_hat);
2935         ASSERT(eaddr <= _userlimit);
2936         if (!(ism_hat->hat_flags & HAT_SHARED)) {
2937                 ASSERT(hat_get_mapped_size(ism_hat) == 0);
2938                 return (0);
2939         }
2940         XPV_DISALLOW_MIGRATE();
2941 
2942         /*
2943          * The SPT segment driver often passes us a size larger than there are
2944          * valid mappings. That's because it rounds the segment size up to a
2945          * large pagesize, even if the actual memory mapped by ism_hat is less.
2946          */
2947         ASSERT(IS_PAGEALIGNED(vaddr_start));
2948         ASSERT(IS_PAGEALIGNED(ism_addr_start));
2949         ASSERT(ism_hat->hat_flags & HAT_SHARED);
2950         is_dism = is_it_dism(hat, addr);
2951         while (ism_addr < e_ism_addr) {
2952                 /*
2953                  * use htable_walk to get the next valid ISM mapping
2954                  */
2955                 pte = htable_walk(ism_hat, &ism_ht, &ism_addr, e_ism_addr);
2956                 if (ism_ht == NULL)
2957                         break;
2958 
2959                 /*
2960                  * First check to see if we already share the page table.
2961                  */
2962                 l = ism_ht->ht_level;
2963                 vaddr = vaddr_start + (ism_addr - ism_addr_start);
2964                 ht = htable_lookup(hat, vaddr, l);
2965                 if (ht != NULL) {
2966                         if (ht->ht_flags & HTABLE_SHARED_PFN)
2967                                 goto shared;
2968                         htable_release(ht);
2969                         goto not_shared;
2970                 }
2971 
2972                 /*
2973                  * Can't ever share top table.
2974                  */
2975                 if (l == mmu.max_level)
2976                         goto not_shared;
2977 
2978                 /*
2979                  * Avoid level mismatches later due to DISM faults.
2980                  */
2981                 if (is_dism && l > 0)
2982                         goto not_shared;
2983 
2984                 /*
2985                  * addresses and lengths must align
2986                  * table must be fully populated
2987                  * no lower level page tables
2988                  */
2989                 if (ism_addr != ism_ht->ht_vaddr ||
2990                     (vaddr & LEVEL_OFFSET(l + 1)) != 0)
2991                         goto not_shared;
2992 
2993                 /*
2994                  * The range of address space must cover a full table.
2995                  */
2996                 if (e_ism_addr - ism_addr < LEVEL_SIZE(l + 1))
2997                         goto not_shared;
2998 
2999                 /*
3000                  * All entries in the ISM page table must be leaf PTEs.
3001                  */
3002                 if (l > 0) {
3003                         int e;
3004 
3005                         /*
3006                          * We know the 0th is from htable_walk() above.
3007                          */
3008                         for (e = 1; e < HTABLE_NUM_PTES(ism_ht); ++e) {
3009                                 x86pte_t pte;
3010                                 pte = x86pte_get(ism_ht, e);
3011                                 if (!PTE_ISPAGE(pte, l))
3012                                         goto not_shared;
3013                         }
3014                 }
3015 
3016                 /*
3017                  * share the page table
3018                  */
3019                 ht = htable_create(hat, vaddr, l, ism_ht);
3020 shared:
3021                 ASSERT(ht->ht_flags & HTABLE_SHARED_PFN);
3022                 ASSERT(ht->ht_shares == ism_ht);
3023                 hat->hat_ism_pgcnt +=
3024                     (ism_ht->ht_valid_cnt - ht->ht_valid_cnt) <<
3025                     (LEVEL_SHIFT(ht->ht_level) - MMU_PAGESHIFT);
3026                 ht->ht_valid_cnt = ism_ht->ht_valid_cnt;
3027                 htable_release(ht);
3028                 ism_addr = ism_ht->ht_vaddr + LEVEL_SIZE(l + 1);
3029                 htable_release(ism_ht);
3030                 ism_ht = NULL;
3031                 continue;
3032 
3033 not_shared:
3034                 /*
3035                  * Unable to share the page table. Instead we will
3036                  * create new mappings from the values in the ISM mappings.
3037                  * Figure out what level size mappings to use;
3038                  */
3039                 for (l = ism_ht->ht_level; l > 0; --l) {
3040                         if (LEVEL_SIZE(l) <= eaddr - vaddr &&
3041                             (vaddr & LEVEL_OFFSET(l)) == 0)
3042                                 break;
3043                 }
3044 
3045                 /*
3046                  * The ISM mapping might be larger than the share area,
3047                  * be careful to truncate it if needed.
3048                  */
3049                 if (eaddr - vaddr >= LEVEL_SIZE(ism_ht->ht_level)) {
3050                         pgcnt = mmu_btop(LEVEL_SIZE(ism_ht->ht_level));
3051                 } else {
3052                         pgcnt = mmu_btop(eaddr - vaddr);
3053                         l = 0;
3054                 }
3055 
3056                 pfn = PTE2PFN(pte, ism_ht->ht_level);
3057                 ASSERT(pfn != PFN_INVALID);
3058                 while (pgcnt > 0) {
3059                         /*
3060                          * Make a new pte for the PFN for this level.
3061                          * Copy protections for the pte from the ISM pte.
3062                          */
3063                         pp = page_numtopp_nolock(pfn);
3064                         ASSERT(pp != NULL);
3065 
3066                         prot = PROT_USER | PROT_READ | HAT_UNORDERED_OK;
3067                         if (PTE_GET(pte, PT_WRITABLE))
3068                                 prot |= PROT_WRITE;
3069                         if (!PTE_GET(pte, PT_NX))
3070                                 prot |= PROT_EXEC;
3071 
3072                         flags = HAT_LOAD;
3073                         if (!is_dism)
3074                                 flags |= HAT_LOAD_LOCK | HAT_LOAD_NOCONSIST;
3075                         while (hati_load_common(hat, vaddr, pp, prot, flags,
3076                             l, pfn) != 0) {
3077                                 if (l == 0)
3078                                         panic("hati_load_common() failure");
3079                                 --l;
3080                         }
3081 
3082                         vaddr += LEVEL_SIZE(l);
3083                         ism_addr += LEVEL_SIZE(l);
3084                         pfn += mmu_btop(LEVEL_SIZE(l));
3085                         pgcnt -= mmu_btop(LEVEL_SIZE(l));
3086                 }
3087         }
3088         if (ism_ht != NULL)
3089                 htable_release(ism_ht);
3090         XPV_ALLOW_MIGRATE();
3091         return (0);
3092 }
3093 
3094 
3095 /*
3096  * hat_unshare() is similar to hat_unload_callback(), but
3097  * we have to look for empty shared pagetables. Note that
3098  * hat_unshare() is always invoked against an entire segment.
3099  */
3100 /*ARGSUSED*/
3101 void
3102 hat_unshare(hat_t *hat, caddr_t addr, size_t len, uint_t ismszc)
3103 {
3104         uint64_t        vaddr = (uintptr_t)addr;
3105         uintptr_t       eaddr = vaddr + len;
3106         htable_t        *ht = NULL;
3107         uint_t          need_demaps = 0;
3108         int             flags = HAT_UNLOAD_UNMAP;
3109         level_t         l;
3110 
3111         ASSERT(hat != kas.a_hat);
3112         ASSERT(eaddr <= _userlimit);
3113         ASSERT(IS_PAGEALIGNED(vaddr));
3114         ASSERT(IS_PAGEALIGNED(eaddr));
3115         XPV_DISALLOW_MIGRATE();
3116 
3117         /*
3118          * First go through and remove any shared pagetables.
3119          *
3120          * Note that it's ok to delay the TLB shootdown till the entire range is
3121          * finished, because if hat_pageunload() were to unload a shared
3122          * pagetable page, its hat_tlb_inval() will do a global TLB invalidate.
3123          */
3124         l = mmu.max_page_level;
3125         if (l == mmu.max_level)
3126                 --l;
3127         for (; l >= 0; --l) {
3128                 for (vaddr = (uintptr_t)addr; vaddr < eaddr;
3129                     vaddr = (vaddr & LEVEL_MASK(l + 1)) + LEVEL_SIZE(l + 1)) {
3130                         ASSERT(!IN_VA_HOLE(vaddr));
3131                         /*
3132                          * find a pagetable that maps the current address
3133                          */
3134                         ht = htable_lookup(hat, vaddr, l);
3135                         if (ht == NULL)
3136                                 continue;
3137                         if (ht->ht_flags & HTABLE_SHARED_PFN) {
3138                                 /*
3139                                  * clear page count, set valid_cnt to 0,
3140                                  * let htable_release() finish the job
3141                                  */
3142                                 hat->hat_ism_pgcnt -= ht->ht_valid_cnt <<
3143                                     (LEVEL_SHIFT(ht->ht_level) - MMU_PAGESHIFT);
3144                                 ht->ht_valid_cnt = 0;
3145                                 need_demaps = 1;
3146                         }
3147                         htable_release(ht);
3148                 }
3149         }
3150 
3151         /*
3152          * flush the TLBs - since we're probably dealing with MANY mappings
3153          * we do just one CR3 reload.
3154          */
3155         if (!(hat->hat_flags & HAT_FREEING) && need_demaps)
3156                 hat_tlb_inval(hat, DEMAP_ALL_ADDR);
3157 
3158         /*
3159          * Now go back and clean up any unaligned mappings that
3160          * couldn't share pagetables.
3161          */
3162         if (!is_it_dism(hat, addr))
3163                 flags |= HAT_UNLOAD_UNLOCK;
3164         hat_unload(hat, addr, len, flags);
3165         XPV_ALLOW_MIGRATE();
3166 }
3167 
3168 
3169 /*
3170  * hat_reserve() does nothing
3171  */
3172 /*ARGSUSED*/
3173 void
3174 hat_reserve(struct as *as, caddr_t addr, size_t len)
3175 {
3176 }
3177 
3178 
3179 /*
3180  * Called when all mappings to a page should have write permission removed.
3181  * Mostly stolen from hat_pagesync()
3182  */
3183 static void
3184 hati_page_clrwrt(struct page *pp)
3185 {
3186         hment_t         *hm = NULL;
3187         htable_t        *ht;
3188         uint_t          entry;
3189         x86pte_t        old;
3190         x86pte_t        new;
3191         uint_t          pszc = 0;
3192 
3193         XPV_DISALLOW_MIGRATE();
3194 next_size:
3195         /*
3196          * walk thru the mapping list clearing write permission
3197          */
3198         x86_hm_enter(pp);
3199         while ((hm = hment_walk(pp, &ht, &entry, hm)) != NULL) {
3200                 if (ht->ht_level < pszc)
3201                         continue;
3202                 old = x86pte_get(ht, entry);
3203 
3204                 for (;;) {
3205                         /*
3206                          * Is this mapping of interest?
3207                          */
3208                         if (PTE2PFN(old, ht->ht_level) != pp->p_pagenum ||
3209                             PTE_GET(old, PT_WRITABLE) == 0)
3210                                 break;
3211 
3212                         /*
3213                          * Clear ref/mod writable bits. This requires cross
3214                          * calls to ensure any executing TLBs see cleared bits.
3215                          */
3216                         new = old;
3217                         PTE_CLR(new, PT_REF | PT_MOD | PT_WRITABLE);
3218                         old = hati_update_pte(ht, entry, old, new);
3219                         if (old != 0)
3220                                 continue;
3221 
3222                         break;
3223                 }
3224         }
3225         x86_hm_exit(pp);
3226         while (pszc < pp->p_szc) {
3227                 page_t *tpp;
3228                 pszc++;
3229                 tpp = PP_GROUPLEADER(pp, pszc);
3230                 if (pp != tpp) {
3231                         pp = tpp;
3232                         goto next_size;
3233                 }
3234         }
3235         XPV_ALLOW_MIGRATE();
3236 }
3237 
3238 /*
3239  * void hat_page_setattr(pp, flag)
3240  * void hat_page_clrattr(pp, flag)
3241  *      used to set/clr ref/mod bits.
3242  */
3243 void
3244 hat_page_setattr(struct page *pp, uint_t flag)
3245 {
3246         vnode_t         *vp = pp->p_vnode;
3247         kmutex_t        *vphm = NULL;
3248         page_t          **listp;
3249         int             noshuffle;
3250 
3251         noshuffle = flag & P_NSH;
3252         flag &= ~P_NSH;
3253 
3254         if (PP_GETRM(pp, flag) == flag)
3255                 return;
3256 
3257         if ((flag & P_MOD) != 0 && vp != NULL && IS_VMODSORT(vp) &&
3258             !noshuffle) {
3259                 vphm = page_vnode_mutex(vp);
3260                 mutex_enter(vphm);
3261         }
3262 
3263         PP_SETRM(pp, flag);
3264 
3265         if (vphm != NULL) {
3266 
3267                 /*
3268                  * Some File Systems examine v_pages for NULL w/o
3269                  * grabbing the vphm mutex. Must not let it become NULL when
3270                  * pp is the only page on the list.
3271                  */
3272                 if (pp->p_vpnext != pp) {
3273                         page_vpsub(&vp->v_pages, pp);
3274                         if (vp->v_pages != NULL)
3275                                 listp = &vp->v_pages->p_vpprev->p_vpnext;
3276                         else
3277                                 listp = &vp->v_pages;
3278                         page_vpadd(listp, pp);
3279                 }
3280                 mutex_exit(vphm);
3281         }
3282 }
3283 
3284 void
3285 hat_page_clrattr(struct page *pp, uint_t flag)
3286 {
3287         vnode_t         *vp = pp->p_vnode;
3288         ASSERT(!(flag & ~(P_MOD | P_REF | P_RO)));
3289 
3290         /*
3291          * Caller is expected to hold page's io lock for VMODSORT to work
3292          * correctly with pvn_vplist_dirty() and pvn_getdirty() when mod
3293          * bit is cleared.
3294          * We don't have assert to avoid tripping some existing third party
3295          * code. The dirty page is moved back to top of the v_page list
3296          * after IO is done in pvn_write_done().
3297          */
3298         PP_CLRRM(pp, flag);
3299 
3300         if ((flag & P_MOD) != 0 && vp != NULL && IS_VMODSORT(vp)) {
3301 
3302                 /*
3303                  * VMODSORT works by removing write permissions and getting
3304                  * a fault when a page is made dirty. At this point
3305                  * we need to remove write permission from all mappings
3306                  * to this page.
3307                  */
3308                 hati_page_clrwrt(pp);
3309         }
3310 }
3311 
3312 /*
3313  *      If flag is specified, returns 0 if attribute is disabled
3314  *      and non zero if enabled.  If flag specifes multiple attributes
3315  *      then returns 0 if ALL attributes are disabled.  This is an advisory
3316  *      call.
3317  */
3318 uint_t
3319 hat_page_getattr(struct page *pp, uint_t flag)
3320 {
3321         return (PP_GETRM(pp, flag));
3322 }
3323 
3324 
3325 /*
3326  * common code used by hat_pageunload() and hment_steal()
3327  */
3328 hment_t *
3329 hati_page_unmap(page_t *pp, htable_t *ht, uint_t entry)
3330 {
3331         x86pte_t old_pte;
3332         pfn_t pfn = pp->p_pagenum;
3333         hment_t *hm;
3334 
3335         /*
3336          * We need to acquire a hold on the htable in order to
3337          * do the invalidate. We know the htable must exist, since
3338          * unmap's don't release the htable until after removing any
3339          * hment. Having x86_hm_enter() keeps that from proceeding.
3340          */
3341         htable_acquire(ht);
3342 
3343         /*
3344          * Invalidate the PTE and remove the hment.
3345          */
3346         old_pte = x86pte_inval(ht, entry, 0, NULL, B_TRUE);
3347         if (PTE2PFN(old_pte, ht->ht_level) != pfn) {
3348                 panic("x86pte_inval() failure found PTE = " FMT_PTE
3349                     " pfn being unmapped is %lx ht=0x%lx entry=0x%x",
3350                     old_pte, pfn, (uintptr_t)ht, entry);
3351         }
3352 
3353         /*
3354          * Clean up all the htable information for this mapping
3355          */
3356         ASSERT(ht->ht_valid_cnt > 0);
3357         HTABLE_DEC(ht->ht_valid_cnt);
3358         PGCNT_DEC(ht->ht_hat, ht->ht_level);
3359 
3360         /*
3361          * sync ref/mod bits to the page_t
3362          */
3363         if (PTE_GET(old_pte, PT_SOFTWARE) < PT_NOSYNC)
3364                 hati_sync_pte_to_page(pp, old_pte, ht->ht_level);
3365 
3366         /*
3367          * Remove the mapping list entry for this page.
3368          */
3369         hm = hment_remove(pp, ht, entry);
3370 
3371         /*
3372          * drop the mapping list lock so that we might free the
3373          * hment and htable.
3374          */
3375         x86_hm_exit(pp);
3376         htable_release(ht);
3377         return (hm);
3378 }
3379 
3380 extern int      vpm_enable;
3381 /*
3382  * Unload all translations to a page. If the page is a subpage of a large
3383  * page, the large page mappings are also removed.
3384  *
3385  * The forceflags are unused.
3386  */
3387 
3388 /*ARGSUSED*/
3389 static int
3390 hati_pageunload(struct page *pp, uint_t pg_szcd, uint_t forceflag)
3391 {
3392         page_t          *cur_pp = pp;
3393         hment_t         *hm;
3394         hment_t         *prev;
3395         htable_t        *ht;
3396         uint_t          entry;
3397         level_t         level;
3398 
3399         XPV_DISALLOW_MIGRATE();
3400 
3401         /*
3402          * prevent recursion due to kmem_free()
3403          */
3404         ++curthread->t_hatdepth;
3405         ASSERT(curthread->t_hatdepth < 16);
3406 
3407 #if defined(__amd64)
3408         /*
3409          * clear the vpm ref.
3410          */
3411         if (vpm_enable) {
3412                 pp->p_vpmref = 0;
3413         }
3414 #endif
3415         /*
3416          * The loop with next_size handles pages with multiple pagesize mappings
3417          */
3418 next_size:
3419         for (;;) {
3420 
3421                 /*
3422                  * Get a mapping list entry
3423                  */
3424                 x86_hm_enter(cur_pp);
3425                 for (prev = NULL; ; prev = hm) {
3426                         hm = hment_walk(cur_pp, &ht, &entry, prev);
3427                         if (hm == NULL) {
3428                                 x86_hm_exit(cur_pp);
3429 
3430                                 /*
3431                                  * If not part of a larger page, we're done.
3432                                  */
3433                                 if (cur_pp->p_szc <= pg_szcd) {
3434                                         ASSERT(curthread->t_hatdepth > 0);
3435                                         --curthread->t_hatdepth;
3436                                         XPV_ALLOW_MIGRATE();
3437                                         return (0);
3438                                 }
3439 
3440                                 /*
3441                                  * Else check the next larger page size.
3442                                  * hat_page_demote() may decrease p_szc
3443                                  * but that's ok we'll just take an extra
3444                                  * trip discover there're no larger mappings
3445                                  * and return.
3446                                  */
3447                                 ++pg_szcd;
3448                                 cur_pp = PP_GROUPLEADER(cur_pp, pg_szcd);
3449                                 goto next_size;
3450                         }
3451 
3452                         /*
3453                          * If this mapping size matches, remove it.
3454                          */
3455                         level = ht->ht_level;
3456                         if (level == pg_szcd)
3457                                 break;
3458                 }
3459 
3460                 /*
3461                  * Remove the mapping list entry for this page.
3462                  * Note this does the x86_hm_exit() for us.
3463                  */
3464                 hm = hati_page_unmap(cur_pp, ht, entry);
3465                 if (hm != NULL)
3466                         hment_free(hm);
3467         }
3468 }
3469 
3470 int
3471 hat_pageunload(struct page *pp, uint_t forceflag)
3472 {
3473         ASSERT(PAGE_EXCL(pp));
3474         return (hati_pageunload(pp, 0, forceflag));
3475 }
3476 
3477 /*
3478  * Unload all large mappings to pp and reduce by 1 p_szc field of every large
3479  * page level that included pp.
3480  *
3481  * pp must be locked EXCL. Even though no other constituent pages are locked
3482  * it's legal to unload large mappings to pp because all constituent pages of
3483  * large locked mappings have to be locked SHARED.  therefore if we have EXCL
3484  * lock on one of constituent pages none of the large mappings to pp are
3485  * locked.
3486  *
3487  * Change (always decrease) p_szc field starting from the last constituent
3488  * page and ending with root constituent page so that root's pszc always shows
3489  * the area where hat_page_demote() may be active.
3490  *
3491  * This mechanism is only used for file system pages where it's not always
3492  * possible to get EXCL locks on all constituent pages to demote the size code
3493  * (as is done for anonymous or kernel large pages).
3494  */
3495 void
3496 hat_page_demote(page_t *pp)
3497 {
3498         uint_t          pszc;
3499         uint_t          rszc;
3500         uint_t          szc;
3501         page_t          *rootpp;
3502         page_t          *firstpp;
3503         page_t          *lastpp;
3504         pgcnt_t         pgcnt;
3505 
3506         ASSERT(PAGE_EXCL(pp));
3507         ASSERT(!PP_ISFREE(pp));
3508         ASSERT(page_szc_lock_assert(pp));
3509 
3510         if (pp->p_szc == 0)
3511                 return;
3512 
3513         rootpp = PP_GROUPLEADER(pp, 1);
3514         (void) hati_pageunload(rootpp, 1, HAT_FORCE_PGUNLOAD);
3515 
3516         /*
3517          * all large mappings to pp are gone
3518          * and no new can be setup since pp is locked exclusively.
3519          *
3520          * Lock the root to make sure there's only one hat_page_demote()
3521          * outstanding within the area of this root's pszc.
3522          *
3523          * Second potential hat_page_demote() is already eliminated by upper
3524          * VM layer via page_szc_lock() but we don't rely on it and use our
3525          * own locking (so that upper layer locking can be changed without
3526          * assumptions that hat depends on upper layer VM to prevent multiple
3527          * hat_page_demote() to be issued simultaneously to the same large
3528          * page).
3529          */
3530 again:
3531         pszc = pp->p_szc;
3532         if (pszc == 0)
3533                 return;
3534         rootpp = PP_GROUPLEADER(pp, pszc);
3535         x86_hm_enter(rootpp);
3536         /*
3537          * If root's p_szc is different from pszc we raced with another
3538          * hat_page_demote().  Drop the lock and try to find the root again.
3539          * If root's p_szc is greater than pszc previous hat_page_demote() is
3540          * not done yet.  Take and release mlist lock of root's root to wait
3541          * for previous hat_page_demote() to complete.
3542          */
3543         if ((rszc = rootpp->p_szc) != pszc) {
3544                 x86_hm_exit(rootpp);
3545                 if (rszc > pszc) {
3546                         /* p_szc of a locked non free page can't increase */
3547                         ASSERT(pp != rootpp);
3548 
3549                         rootpp = PP_GROUPLEADER(rootpp, rszc);
3550                         x86_hm_enter(rootpp);
3551                         x86_hm_exit(rootpp);
3552                 }
3553                 goto again;
3554         }
3555         ASSERT(pp->p_szc == pszc);
3556 
3557         /*
3558          * Decrement by 1 p_szc of every constituent page of a region that
3559          * covered pp. For example if original szc is 3 it gets changed to 2
3560          * everywhere except in region 2 that covered pp. Region 2 that
3561          * covered pp gets demoted to 1 everywhere except in region 1 that
3562          * covered pp. The region 1 that covered pp is demoted to region
3563          * 0. It's done this way because from region 3 we removed level 3
3564          * mappings, from region 2 that covered pp we removed level 2 mappings
3565          * and from region 1 that covered pp we removed level 1 mappings.  All
3566          * changes are done from from high pfn's to low pfn's so that roots
3567          * are changed last allowing one to know the largest region where
3568          * hat_page_demote() is stil active by only looking at the root page.
3569          *
3570          * This algorithm is implemented in 2 while loops. First loop changes
3571          * p_szc of pages to the right of pp's level 1 region and second
3572          * loop changes p_szc of pages of level 1 region that covers pp
3573          * and all pages to the left of level 1 region that covers pp.
3574          * In the first loop p_szc keeps dropping with every iteration
3575          * and in the second loop it keeps increasing with every iteration.
3576          *
3577          * First loop description: Demote pages to the right of pp outside of
3578          * level 1 region that covers pp.  In every iteration of the while
3579          * loop below find the last page of szc region and the first page of
3580          * (szc - 1) region that is immediately to the right of (szc - 1)
3581          * region that covers pp.  From last such page to first such page
3582          * change every page's szc to szc - 1. Decrement szc and continue
3583          * looping until szc is 1. If pp belongs to the last (szc - 1) region
3584          * of szc region skip to the next iteration.
3585          */
3586         szc = pszc;
3587         while (szc > 1) {
3588                 lastpp = PP_GROUPLEADER(pp, szc);
3589                 pgcnt = page_get_pagecnt(szc);
3590                 lastpp += pgcnt - 1;
3591                 firstpp = PP_GROUPLEADER(pp, (szc - 1));
3592                 pgcnt = page_get_pagecnt(szc - 1);
3593                 if (lastpp - firstpp < pgcnt) {
3594                         szc--;
3595                         continue;
3596                 }
3597                 firstpp += pgcnt;
3598                 while (lastpp != firstpp) {
3599                         ASSERT(lastpp->p_szc == pszc);
3600                         lastpp->p_szc = szc - 1;
3601                         lastpp--;
3602                 }
3603                 firstpp->p_szc = szc - 1;
3604                 szc--;
3605         }
3606 
3607         /*
3608          * Second loop description:
3609          * First iteration changes p_szc to 0 of every
3610          * page of level 1 region that covers pp.
3611          * Subsequent iterations find last page of szc region
3612          * immediately to the left of szc region that covered pp
3613          * and first page of (szc + 1) region that covers pp.
3614          * From last to first page change p_szc of every page to szc.
3615          * Increment szc and continue looping until szc is pszc.
3616          * If pp belongs to the fist szc region of (szc + 1) region
3617          * skip to the next iteration.
3618          *
3619          */
3620         szc = 0;
3621         while (szc < pszc) {
3622                 firstpp = PP_GROUPLEADER(pp, (szc + 1));
3623                 if (szc == 0) {
3624                         pgcnt = page_get_pagecnt(1);
3625                         lastpp = firstpp + (pgcnt - 1);
3626                 } else {
3627                         lastpp = PP_GROUPLEADER(pp, szc);
3628                         if (firstpp == lastpp) {
3629                                 szc++;
3630                                 continue;
3631                         }
3632                         lastpp--;
3633                         pgcnt = page_get_pagecnt(szc);
3634                 }
3635                 while (lastpp != firstpp) {
3636                         ASSERT(lastpp->p_szc == pszc);
3637                         lastpp->p_szc = szc;
3638                         lastpp--;
3639                 }
3640                 firstpp->p_szc = szc;
3641                 if (firstpp == rootpp)
3642                         break;
3643                 szc++;
3644         }
3645         x86_hm_exit(rootpp);
3646 }
3647 
3648 /*
3649  * get hw stats from hardware into page struct and reset hw stats
3650  * returns attributes of page
3651  * Flags for hat_pagesync, hat_getstat, hat_sync
3652  *
3653  * define       HAT_SYNC_ZERORM         0x01
3654  *
3655  * Additional flags for hat_pagesync
3656  *
3657  * define       HAT_SYNC_STOPON_REF     0x02
3658  * define       HAT_SYNC_STOPON_MOD     0x04
3659  * define       HAT_SYNC_STOPON_RM      0x06
3660  * define       HAT_SYNC_STOPON_SHARED  0x08
3661  */
3662 uint_t
3663 hat_pagesync(struct page *pp, uint_t flags)
3664 {
3665         hment_t         *hm = NULL;
3666         htable_t        *ht;
3667         uint_t          entry;
3668         x86pte_t        old, save_old;
3669         x86pte_t        new;
3670         uchar_t         nrmbits = P_REF|P_MOD|P_RO;
3671         extern ulong_t  po_share;
3672         page_t          *save_pp = pp;
3673         uint_t          pszc = 0;
3674 
3675         ASSERT(PAGE_LOCKED(pp) || panicstr);
3676 
3677         if (PP_ISRO(pp) && (flags & HAT_SYNC_STOPON_MOD))
3678                 return (pp->p_nrm & nrmbits);
3679 
3680         if ((flags & HAT_SYNC_ZERORM) == 0) {
3681 
3682                 if ((flags & HAT_SYNC_STOPON_REF) != 0 && PP_ISREF(pp))
3683                         return (pp->p_nrm & nrmbits);
3684 
3685                 if ((flags & HAT_SYNC_STOPON_MOD) != 0 && PP_ISMOD(pp))
3686                         return (pp->p_nrm & nrmbits);
3687 
3688                 if ((flags & HAT_SYNC_STOPON_SHARED) != 0 &&
3689                     hat_page_getshare(pp) > po_share) {
3690                         if (PP_ISRO(pp))
3691                                 PP_SETREF(pp);
3692                         return (pp->p_nrm & nrmbits);
3693                 }
3694         }
3695 
3696         XPV_DISALLOW_MIGRATE();
3697 next_size:
3698         /*
3699          * walk thru the mapping list syncing (and clearing) ref/mod bits.
3700          */
3701         x86_hm_enter(pp);
3702         while ((hm = hment_walk(pp, &ht, &entry, hm)) != NULL) {
3703                 if (ht->ht_level < pszc)
3704                         continue;
3705                 old = x86pte_get(ht, entry);
3706 try_again:
3707 
3708                 ASSERT(PTE2PFN(old, ht->ht_level) == pp->p_pagenum);
3709 
3710                 if (PTE_GET(old, PT_REF | PT_MOD) == 0)
3711                         continue;
3712 
3713                 save_old = old;
3714                 if ((flags & HAT_SYNC_ZERORM) != 0) {
3715 
3716                         /*
3717                          * Need to clear ref or mod bits. Need to demap
3718                          * to make sure any executing TLBs see cleared bits.
3719                          */
3720                         new = old;
3721                         PTE_CLR(new, PT_REF | PT_MOD);
3722                         old = hati_update_pte(ht, entry, old, new);
3723                         if (old != 0)
3724                                 goto try_again;
3725 
3726                         old = save_old;
3727                 }
3728 
3729                 /*
3730                  * Sync the PTE
3731                  */
3732                 if (!(flags & HAT_SYNC_ZERORM) &&
3733                     PTE_GET(old, PT_SOFTWARE) <= PT_NOSYNC)
3734                         hati_sync_pte_to_page(pp, old, ht->ht_level);
3735 
3736                 /*
3737                  * can stop short if we found a ref'd or mod'd page
3738                  */
3739                 if ((flags & HAT_SYNC_STOPON_MOD) && PP_ISMOD(save_pp) ||
3740                     (flags & HAT_SYNC_STOPON_REF) && PP_ISREF(save_pp)) {
3741                         x86_hm_exit(pp);
3742                         goto done;
3743                 }
3744         }
3745         x86_hm_exit(pp);
3746         while (pszc < pp->p_szc) {
3747                 page_t *tpp;
3748                 pszc++;
3749                 tpp = PP_GROUPLEADER(pp, pszc);
3750                 if (pp != tpp) {
3751                         pp = tpp;
3752                         goto next_size;
3753                 }
3754         }
3755 done:
3756         XPV_ALLOW_MIGRATE();
3757         return (save_pp->p_nrm & nrmbits);
3758 }
3759 
3760 /*
3761  * returns approx number of mappings to this pp.  A return of 0 implies
3762  * there are no mappings to the page.
3763  */
3764 ulong_t
3765 hat_page_getshare(page_t *pp)
3766 {
3767         uint_t cnt;
3768         cnt = hment_mapcnt(pp);
3769 #if defined(__amd64)
3770         if (vpm_enable && pp->p_vpmref) {
3771                 cnt += 1;
3772         }
3773 #endif
3774         return (cnt);
3775 }
3776 
3777 /*
3778  * Return 1 the number of mappings exceeds sh_thresh. Return 0
3779  * otherwise.
3780  */
3781 int
3782 hat_page_checkshare(page_t *pp, ulong_t sh_thresh)
3783 {
3784         return (hat_page_getshare(pp) > sh_thresh);
3785 }
3786 
3787 /*
3788  * hat_softlock isn't supported anymore
3789  */
3790 /*ARGSUSED*/
3791 faultcode_t
3792 hat_softlock(
3793         hat_t *hat,
3794         caddr_t addr,
3795         size_t *len,
3796         struct page **page_array,
3797         uint_t flags)
3798 {
3799         return (FC_NOSUPPORT);
3800 }
3801 
3802 
3803 
3804 /*
3805  * Routine to expose supported HAT features to platform independent code.
3806  */
3807 /*ARGSUSED*/
3808 int
3809 hat_supported(enum hat_features feature, void *arg)
3810 {
3811         switch (feature) {
3812 
3813         case HAT_SHARED_PT:     /* this is really ISM */
3814                 return (1);
3815 
3816         case HAT_DYNAMIC_ISM_UNMAP:
3817                 return (0);
3818 
3819         case HAT_VMODSORT:
3820                 return (1);
3821 
3822         case HAT_SHARED_REGIONS:
3823                 return (0);
3824 
3825         default:
3826                 panic("hat_supported() - unknown feature");
3827         }
3828         return (0);
3829 }
3830 
3831 /*
3832  * Called when a thread is exiting and has been switched to the kernel AS
3833  */
3834 void
3835 hat_thread_exit(kthread_t *thd)
3836 {
3837         ASSERT(thd->t_procp->p_as == &kas);
3838         XPV_DISALLOW_MIGRATE();
3839         hat_switch(thd->t_procp->p_as->a_hat);
3840         XPV_ALLOW_MIGRATE();
3841 }
3842 
3843 /*
3844  * Setup the given brand new hat structure as the new HAT on this cpu's mmu.
3845  */
3846 /*ARGSUSED*/
3847 void
3848 hat_setup(hat_t *hat, int flags)
3849 {
3850         XPV_DISALLOW_MIGRATE();
3851         kpreempt_disable();
3852 
3853         hat_switch(hat);
3854 
3855         kpreempt_enable();
3856         XPV_ALLOW_MIGRATE();
3857 }
3858 
3859 /*
3860  * Prepare for a CPU private mapping for the given address.
3861  *
3862  * The address can only be used from a single CPU and can be remapped
3863  * using hat_mempte_remap().  Return the address of the PTE.
3864  *
3865  * We do the htable_create() if necessary and increment the valid count so
3866  * the htable can't disappear.  We also hat_devload() the page table into
3867  * kernel so that the PTE is quickly accessed.
3868  */
3869 hat_mempte_t
3870 hat_mempte_setup(caddr_t addr)
3871 {
3872         uintptr_t       va = (uintptr_t)addr;
3873         htable_t        *ht;
3874         uint_t          entry;
3875         x86pte_t        oldpte;
3876         hat_mempte_t    p;
3877 
3878         ASSERT(IS_PAGEALIGNED(va));
3879         ASSERT(!IN_VA_HOLE(va));
3880         ++curthread->t_hatdepth;
3881         XPV_DISALLOW_MIGRATE();
3882         ht = htable_getpte(kas.a_hat, va, &entry, &oldpte, 0);
3883         if (ht == NULL) {
3884                 ht = htable_create(kas.a_hat, va, 0, NULL);
3885                 entry = htable_va2entry(va, ht);
3886                 ASSERT(ht->ht_level == 0);
3887                 oldpte = x86pte_get(ht, entry);
3888         }
3889         if (PTE_ISVALID(oldpte))
3890                 panic("hat_mempte_setup(): address already mapped"
3891                     "ht=%p, entry=%d, pte=" FMT_PTE, (void *)ht, entry, oldpte);
3892 
3893         /*
3894          * increment ht_valid_cnt so that the pagetable can't disappear
3895          */
3896         HTABLE_INC(ht->ht_valid_cnt);
3897 
3898         /*
3899          * return the PTE physical address to the caller.
3900          */
3901         htable_release(ht);
3902         XPV_ALLOW_MIGRATE();
3903         p = PT_INDEX_PHYSADDR(pfn_to_pa(ht->ht_pfn), entry);
3904         --curthread->t_hatdepth;
3905         return (p);
3906 }
3907 
3908 /*
3909  * Release a CPU private mapping for the given address.
3910  * We decrement the htable valid count so it might be destroyed.
3911  */
3912 /*ARGSUSED1*/
3913 void
3914 hat_mempte_release(caddr_t addr, hat_mempte_t pte_pa)
3915 {
3916         htable_t        *ht;
3917 
3918         XPV_DISALLOW_MIGRATE();
3919         /*
3920          * invalidate any left over mapping and decrement the htable valid count
3921          */
3922 #ifdef __xpv
3923         if (HYPERVISOR_update_va_mapping((uintptr_t)addr, 0,
3924             UVMF_INVLPG | UVMF_LOCAL))
3925                 panic("HYPERVISOR_update_va_mapping() failed");
3926 #else
3927         {
3928                 x86pte_t *pteptr;
3929 
3930                 pteptr = x86pte_mapin(mmu_btop(pte_pa),
3931                     (pte_pa & MMU_PAGEOFFSET) >> mmu.pte_size_shift, NULL);
3932                 if (mmu.pae_hat)
3933                         *pteptr = 0;
3934                 else
3935                         *(x86pte32_t *)pteptr = 0;
3936                 mmu_tlbflush_entry(addr);
3937                 x86pte_mapout();
3938         }
3939 #endif
3940 
3941         ht = htable_getpte(kas.a_hat, ALIGN2PAGE(addr), NULL, NULL, 0);
3942         if (ht == NULL)
3943                 panic("hat_mempte_release(): invalid address");
3944         ASSERT(ht->ht_level == 0);
3945         HTABLE_DEC(ht->ht_valid_cnt);
3946         htable_release(ht);
3947         XPV_ALLOW_MIGRATE();
3948 }
3949 
3950 /*
3951  * Apply a temporary CPU private mapping to a page. We flush the TLB only
3952  * on this CPU, so this ought to have been called with preemption disabled.
3953  */
3954 void
3955 hat_mempte_remap(
3956         pfn_t           pfn,
3957         caddr_t         addr,
3958         hat_mempte_t    pte_pa,
3959         uint_t          attr,
3960         uint_t          flags)
3961 {
3962         uintptr_t       va = (uintptr_t)addr;
3963         x86pte_t        pte;
3964 
3965         /*
3966          * Remap the given PTE to the new page's PFN. Invalidate only
3967          * on this CPU.
3968          */
3969 #ifdef DEBUG
3970         htable_t        *ht;
3971         uint_t          entry;
3972 
3973         ASSERT(IS_PAGEALIGNED(va));
3974         ASSERT(!IN_VA_HOLE(va));
3975         ht = htable_getpte(kas.a_hat, va, &entry, NULL, 0);
3976         ASSERT(ht != NULL);
3977         ASSERT(ht->ht_level == 0);
3978         ASSERT(ht->ht_valid_cnt > 0);
3979         ASSERT(ht->ht_pfn == mmu_btop(pte_pa));
3980         htable_release(ht);
3981 #endif
3982         XPV_DISALLOW_MIGRATE();
3983         pte = hati_mkpte(pfn, attr, 0, flags);
3984 #ifdef __xpv
3985         if (HYPERVISOR_update_va_mapping(va, pte, UVMF_INVLPG | UVMF_LOCAL))
3986                 panic("HYPERVISOR_update_va_mapping() failed");
3987 #else
3988         {
3989                 x86pte_t *pteptr;
3990 
3991                 pteptr = x86pte_mapin(mmu_btop(pte_pa),
3992                     (pte_pa & MMU_PAGEOFFSET) >> mmu.pte_size_shift, NULL);
3993                 if (mmu.pae_hat)
3994                         *(x86pte_t *)pteptr = pte;
3995                 else
3996                         *(x86pte32_t *)pteptr = (x86pte32_t)pte;
3997                 mmu_tlbflush_entry(addr);
3998                 x86pte_mapout();
3999         }
4000 #endif
4001         XPV_ALLOW_MIGRATE();
4002 }
4003 
4004 
4005 
4006 /*
4007  * Hat locking functions
4008  * XXX - these two functions are currently being used by hatstats
4009  *      they can be removed by using a per-as mutex for hatstats.
4010  */
4011 void
4012 hat_enter(hat_t *hat)
4013 {
4014         mutex_enter(&hat->hat_mutex);
4015 }
4016 
4017 void
4018 hat_exit(hat_t *hat)
4019 {
4020         mutex_exit(&hat->hat_mutex);
4021 }
4022 
4023 /*
4024  * HAT part of cpu initialization.
4025  */
4026 void
4027 hat_cpu_online(struct cpu *cpup)
4028 {
4029         if (cpup != CPU) {
4030                 x86pte_cpu_init(cpup);
4031                 hat_vlp_setup(cpup);
4032         }
4033         CPUSET_ATOMIC_ADD(khat_cpuset, cpup->cpu_id);
4034 }
4035 
4036 /*
4037  * HAT part of cpu deletion.
4038  * (currently, we only call this after the cpu is safely passivated.)
4039  */
4040 void
4041 hat_cpu_offline(struct cpu *cpup)
4042 {
4043         ASSERT(cpup != CPU);
4044 
4045         CPUSET_ATOMIC_DEL(khat_cpuset, cpup->cpu_id);
4046         hat_vlp_teardown(cpup);
4047         x86pte_cpu_fini(cpup);
4048 }
4049 
4050 /*
4051  * Function called after all CPUs are brought online.
4052  * Used to remove low address boot mappings.
4053  */
4054 void
4055 clear_boot_mappings(uintptr_t low, uintptr_t high)
4056 {
4057         uintptr_t vaddr = low;
4058         htable_t *ht = NULL;
4059         level_t level;
4060         uint_t entry;
4061         x86pte_t pte;
4062 
4063         /*
4064          * On 1st CPU we can unload the prom mappings, basically we blow away
4065          * all virtual mappings under _userlimit.
4066          */
4067         while (vaddr < high) {
4068                 pte = htable_walk(kas.a_hat, &ht, &vaddr, high);
4069                 if (ht == NULL)
4070                         break;
4071 
4072                 level = ht->ht_level;
4073                 entry = htable_va2entry(vaddr, ht);
4074                 ASSERT(level <= mmu.max_page_level);
4075                 ASSERT(PTE_ISPAGE(pte, level));
4076 
4077                 /*
4078                  * Unload the mapping from the page tables.
4079                  */
4080                 (void) x86pte_inval(ht, entry, 0, NULL, B_TRUE);
4081                 ASSERT(ht->ht_valid_cnt > 0);
4082                 HTABLE_DEC(ht->ht_valid_cnt);
4083                 PGCNT_DEC(ht->ht_hat, ht->ht_level);
4084 
4085                 vaddr += LEVEL_SIZE(ht->ht_level);
4086         }
4087         if (ht)
4088                 htable_release(ht);
4089 }
4090 
4091 /*
4092  * Atomically update a new translation for a single page.  If the
4093  * currently installed PTE doesn't match the value we expect to find,
4094  * it's not updated and we return the PTE we found.
4095  *
4096  * If activating nosync or NOWRITE and the page was modified we need to sync
4097  * with the page_t. Also sync with page_t if clearing ref/mod bits.
4098  */
4099 static x86pte_t
4100 hati_update_pte(htable_t *ht, uint_t entry, x86pte_t expected, x86pte_t new)
4101 {
4102         page_t          *pp;
4103         uint_t          rm = 0;
4104         x86pte_t        replaced;
4105 
4106         if (PTE_GET(expected, PT_SOFTWARE) < PT_NOSYNC &&
4107             PTE_GET(expected, PT_MOD | PT_REF) &&
4108             (PTE_GET(new, PT_NOSYNC) || !PTE_GET(new, PT_WRITABLE) ||
4109             !PTE_GET(new, PT_MOD | PT_REF))) {
4110 
4111                 ASSERT(!pfn_is_foreign(PTE2PFN(expected, ht->ht_level)));
4112                 pp = page_numtopp_nolock(PTE2PFN(expected, ht->ht_level));
4113                 ASSERT(pp != NULL);
4114                 if (PTE_GET(expected, PT_MOD))
4115                         rm |= P_MOD;
4116                 if (PTE_GET(expected, PT_REF))
4117                         rm |= P_REF;
4118                 PTE_CLR(new, PT_MOD | PT_REF);
4119         }
4120 
4121         replaced = x86pte_update(ht, entry, expected, new);
4122         if (replaced != expected)
4123                 return (replaced);
4124 
4125         if (rm) {
4126                 /*
4127                  * sync to all constituent pages of a large page
4128                  */
4129                 pgcnt_t pgcnt = page_get_pagecnt(ht->ht_level);
4130                 ASSERT(IS_P2ALIGNED(pp->p_pagenum, pgcnt));
4131                 while (pgcnt-- > 0) {
4132                         /*
4133                          * hat_page_demote() can't decrease
4134                          * pszc below this mapping size
4135                          * since large mapping existed after we
4136                          * took mlist lock.
4137                          */
4138                         ASSERT(pp->p_szc >= ht->ht_level);
4139                         hat_page_setattr(pp, rm);
4140                         ++pp;
4141                 }
4142         }
4143 
4144         return (0);
4145 }
4146 
4147 /* ARGSUSED */
4148 void
4149 hat_join_srd(struct hat *hat, vnode_t *evp)
4150 {
4151 }
4152 
4153 /* ARGSUSED */
4154 hat_region_cookie_t
4155 hat_join_region(struct hat *hat,
4156     caddr_t r_saddr,
4157     size_t r_size,
4158     void *r_obj,
4159     u_offset_t r_objoff,
4160     uchar_t r_perm,
4161     uchar_t r_pgszc,
4162     hat_rgn_cb_func_t r_cb_function,
4163     uint_t flags)
4164 {
4165         panic("No shared region support on x86");
4166         return (HAT_INVALID_REGION_COOKIE);
4167 }
4168 
4169 /* ARGSUSED */
4170 void
4171 hat_leave_region(struct hat *hat, hat_region_cookie_t rcookie, uint_t flags)
4172 {
4173         panic("No shared region support on x86");
4174 }
4175 
4176 /* ARGSUSED */
4177 void
4178 hat_dup_region(struct hat *hat, hat_region_cookie_t rcookie)
4179 {
4180         panic("No shared region support on x86");
4181 }
4182 
4183 
4184 /*
4185  * Kernel Physical Mapping (kpm) facility
4186  *
4187  * Most of the routines needed to support segkpm are almost no-ops on the
4188  * x86 platform.  We map in the entire segment when it is created and leave
4189  * it mapped in, so there is no additional work required to set up and tear
4190  * down individual mappings.  All of these routines were created to support
4191  * SPARC platforms that have to avoid aliasing in their virtually indexed
4192  * caches.
4193  *
4194  * Most of the routines have sanity checks in them (e.g. verifying that the
4195  * passed-in page is locked).  We don't actually care about most of these
4196  * checks on x86, but we leave them in place to identify problems in the
4197  * upper levels.
4198  */
4199 
4200 /*
4201  * Map in a locked page and return the vaddr.
4202  */
4203 /*ARGSUSED*/
4204 caddr_t
4205 hat_kpm_mapin(struct page *pp, struct kpme *kpme)
4206 {
4207         caddr_t         vaddr;
4208 
4209 #ifdef DEBUG
4210         if (kpm_enable == 0) {
4211                 cmn_err(CE_WARN, "hat_kpm_mapin: kpm_enable not set\n");
4212                 return ((caddr_t)NULL);
4213         }
4214 
4215         if (pp == NULL || PAGE_LOCKED(pp) == 0) {
4216                 cmn_err(CE_WARN, "hat_kpm_mapin: pp zero or not locked\n");
4217                 return ((caddr_t)NULL);
4218         }
4219 #endif
4220 
4221         vaddr = hat_kpm_page2va(pp, 1);
4222 
4223         return (vaddr);
4224 }
4225 
4226 /*
4227  * Mapout a locked page.
4228  */
4229 /*ARGSUSED*/
4230 void
4231 hat_kpm_mapout(struct page *pp, struct kpme *kpme, caddr_t vaddr)
4232 {
4233 #ifdef DEBUG
4234         if (kpm_enable == 0) {
4235                 cmn_err(CE_WARN, "hat_kpm_mapout: kpm_enable not set\n");
4236                 return;
4237         }
4238 
4239         if (IS_KPM_ADDR(vaddr) == 0) {
4240                 cmn_err(CE_WARN, "hat_kpm_mapout: no kpm address\n");
4241                 return;
4242         }
4243 
4244         if (pp == NULL || PAGE_LOCKED(pp) == 0) {
4245                 cmn_err(CE_WARN, "hat_kpm_mapout: page zero or not locked\n");
4246                 return;
4247         }
4248 #endif
4249 }
4250 
4251 /*
4252  * hat_kpm_mapin_pfn is used to obtain a kpm mapping for physical
4253  * memory addresses that are not described by a page_t.  It can
4254  * also be used for normal pages that are not locked, but beware
4255  * this is dangerous - no locking is performed, so the identity of
4256  * the page could change.  hat_kpm_mapin_pfn is not supported when
4257  * vac_colors > 1, because the chosen va depends on the page identity,
4258  * which could change.
4259  * The caller must only pass pfn's for valid physical addresses; violation
4260  * of this rule will cause panic.
4261  */
4262 caddr_t
4263 hat_kpm_mapin_pfn(pfn_t pfn)
4264 {
4265         caddr_t paddr, vaddr;
4266 
4267         if (kpm_enable == 0)
4268                 return ((caddr_t)NULL);
4269 
4270         paddr = (caddr_t)ptob(pfn);
4271         vaddr = (uintptr_t)kpm_vbase + paddr;
4272 
4273         return ((caddr_t)vaddr);
4274 }
4275 
4276 /*ARGSUSED*/
4277 void
4278 hat_kpm_mapout_pfn(pfn_t pfn)
4279 {
4280         /* empty */
4281 }
4282 
4283 /*
4284  * Return the kpm virtual address for a specific pfn
4285  */
4286 caddr_t
4287 hat_kpm_pfn2va(pfn_t pfn)
4288 {
4289         uintptr_t vaddr = (uintptr_t)kpm_vbase + mmu_ptob(pfn);
4290 
4291         ASSERT(!pfn_is_foreign(pfn));
4292         return ((caddr_t)vaddr);
4293 }
4294 
4295 /*
4296  * Return the kpm virtual address for the page at pp.
4297  */
4298 /*ARGSUSED*/
4299 caddr_t
4300 hat_kpm_page2va(struct page *pp, int checkswap)
4301 {
4302         return (hat_kpm_pfn2va(pp->p_pagenum));
4303 }
4304 
4305 /*
4306  * Return the page frame number for the kpm virtual address vaddr.
4307  */
4308 pfn_t
4309 hat_kpm_va2pfn(caddr_t vaddr)
4310 {
4311         pfn_t           pfn;
4312 
4313         ASSERT(IS_KPM_ADDR(vaddr));
4314 
4315         pfn = (pfn_t)btop(vaddr - kpm_vbase);
4316 
4317         return (pfn);
4318 }
4319 
4320 
4321 /*
4322  * Return the page for the kpm virtual address vaddr.
4323  */
4324 page_t *
4325 hat_kpm_vaddr2page(caddr_t vaddr)
4326 {
4327         pfn_t           pfn;
4328 
4329         ASSERT(IS_KPM_ADDR(vaddr));
4330 
4331         pfn = hat_kpm_va2pfn(vaddr);
4332 
4333         return (page_numtopp_nolock(pfn));
4334 }
4335 
4336 /*
4337  * hat_kpm_fault is called from segkpm_fault when we take a page fault on a
4338  * KPM page.  This should never happen on x86
4339  */
4340 int
4341 hat_kpm_fault(hat_t *hat, caddr_t vaddr)
4342 {
4343         panic("pagefault in seg_kpm.  hat: 0x%p  vaddr: 0x%p",
4344             (void *)hat, (void *)vaddr);
4345 
4346         return (0);
4347 }
4348 
4349 /*ARGSUSED*/
4350 void
4351 hat_kpm_mseghash_clear(int nentries)
4352 {}
4353 
4354 /*ARGSUSED*/
4355 void
4356 hat_kpm_mseghash_update(pgcnt_t inx, struct memseg *msp)
4357 {}
4358 
4359 #ifndef __xpv
4360 void
4361 hat_kpm_addmem_mseg_update(struct memseg *msp, pgcnt_t nkpmpgs,
4362         offset_t kpm_pages_off)
4363 {
4364         _NOTE(ARGUNUSED(nkpmpgs, kpm_pages_off));
4365         pfn_t base, end;
4366 
4367         /*
4368          * kphysm_add_memory_dynamic() does not set nkpmpgs
4369          * when page_t memory is externally allocated.  That
4370          * code must properly calculate nkpmpgs in all cases
4371          * if nkpmpgs needs to be used at some point.
4372          */
4373 
4374         /*
4375          * The meta (page_t) pages for dynamically added memory are allocated
4376          * either from the incoming memory itself or from existing memory.
4377          * In the former case the base of the incoming pages will be different
4378          * than the base of the dynamic segment so call memseg_get_start() to
4379          * get the actual base of the incoming memory for each case.
4380          */
4381 
4382         base = memseg_get_start(msp);
4383         end = msp->pages_end;
4384 
4385         hat_devload(kas.a_hat, kpm_vbase + mmu_ptob(base),
4386             mmu_ptob(end - base), base, PROT_READ | PROT_WRITE,
4387             HAT_LOAD | HAT_LOAD_LOCK | HAT_LOAD_NOCONSIST);
4388 }
4389 
4390 void
4391 hat_kpm_addmem_mseg_insert(struct memseg *msp)
4392 {
4393         _NOTE(ARGUNUSED(msp));
4394 }
4395 
4396 void
4397 hat_kpm_addmem_memsegs_update(struct memseg *msp)
4398 {
4399         _NOTE(ARGUNUSED(msp));
4400 }
4401 
4402 /*
4403  * Return end of metadata for an already setup memseg.
4404  * X86 platforms don't need per-page meta data to support kpm.
4405  */
4406 caddr_t
4407 hat_kpm_mseg_reuse(struct memseg *msp)
4408 {
4409         return ((caddr_t)msp->epages);
4410 }
4411 
4412 void
4413 hat_kpm_delmem_mseg_update(struct memseg *msp, struct memseg **mspp)
4414 {
4415         _NOTE(ARGUNUSED(msp, mspp));
4416         ASSERT(0);
4417 }
4418 
4419 void
4420 hat_kpm_split_mseg_update(struct memseg *msp, struct memseg **mspp,
4421         struct memseg *lo, struct memseg *mid, struct memseg *hi)
4422 {
4423         _NOTE(ARGUNUSED(msp, mspp, lo, mid, hi));
4424         ASSERT(0);
4425 }
4426 
4427 /*
4428  * Walk the memsegs chain, applying func to each memseg span.
4429  */
4430 void
4431 hat_kpm_walk(void (*func)(void *, void *, size_t), void *arg)
4432 {
4433         pfn_t   pbase, pend;
4434         void    *base;
4435         size_t  size;
4436         struct memseg *msp;
4437 
4438         for (msp = memsegs; msp; msp = msp->next) {
4439                 pbase = msp->pages_base;
4440                 pend = msp->pages_end;
4441                 base = ptob(pbase) + kpm_vbase;
4442                 size = ptob(pend - pbase);
4443                 func(arg, base, size);
4444         }
4445 }
4446 
4447 #else   /* __xpv */
4448 
4449 /*
4450  * There are specific Hypervisor calls to establish and remove mappings
4451  * to grant table references and the privcmd driver. We have to ensure
4452  * that a page table actually exists.
4453  */
4454 void
4455 hat_prepare_mapping(hat_t *hat, caddr_t addr, uint64_t *pte_ma)
4456 {
4457         maddr_t base_ma;
4458         htable_t *ht;
4459         uint_t entry;
4460 
4461         ASSERT(IS_P2ALIGNED((uintptr_t)addr, MMU_PAGESIZE));
4462         XPV_DISALLOW_MIGRATE();
4463         ht = htable_create(hat, (uintptr_t)addr, 0, NULL);
4464 
4465         /*
4466          * if an address for pte_ma is passed in, return the MA of the pte
4467          * for this specific address.  This address is only valid as long
4468          * as the htable stays locked.
4469          */
4470         if (pte_ma != NULL) {
4471                 entry = htable_va2entry((uintptr_t)addr, ht);
4472                 base_ma = pa_to_ma(ptob(ht->ht_pfn));
4473                 *pte_ma = base_ma + (entry << mmu.pte_size_shift);
4474         }
4475         XPV_ALLOW_MIGRATE();
4476 }
4477 
4478 void
4479 hat_release_mapping(hat_t *hat, caddr_t addr)
4480 {
4481         htable_t *ht;
4482 
4483         ASSERT(IS_P2ALIGNED((uintptr_t)addr, MMU_PAGESIZE));
4484         XPV_DISALLOW_MIGRATE();
4485         ht = htable_lookup(hat, (uintptr_t)addr, 0);
4486         ASSERT(ht != NULL);
4487         ASSERT(ht->ht_busy >= 2);
4488         htable_release(ht);
4489         htable_release(ht);
4490         XPV_ALLOW_MIGRATE();
4491 }
4492 #endif  /* __xpv */