1 /*
   2  * CDDL HEADER START
   3  *
   4  * The contents of this file are subject to the terms of the
   5  * Common Development and Distribution License, Version 1.0 only
   6  * (the "License").  You may not use this file except in compliance
   7  * with the License.
   8  *
   9  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
  10  * or http://www.opensolaris.org/os/licensing.
  11  * See the License for the specific language governing permissions
  12  * and limitations under the License.
  13  *
  14  * When distributing Covered Code, include this CDDL HEADER in each
  15  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
  16  * If applicable, add the following below this CDDL HEADER, with the
  17  * fields enclosed by brackets "[]" replaced with your own identifying
  18  * information: Portions Copyright [yyyy] [name of copyright owner]
  19  *
  20  * CDDL HEADER END
  21  */
  22 /*
  23  * Copyright 2005 Sun Microsystems, Inc.  All rights reserved.
  24  * Use is subject to license terms.
  25  */
  26 
  27 #include <sys/types.h>
  28 #include <sys/cmn_err.h>
  29 #include <sys/mman.h>
  30 #include <sys/sunddi.h>
  31 #include <sys/tnf_probe.h>
  32 #include <vm/hat_sfmmu.h>
  33 #include <vm/as.h>
  34 #include <vm/xhat.h>
  35 #include <vm/xhat_sfmmu.h>
  36 #include <sys/zulu_hat.h>
  37 #include <sys/zulumod.h>
  38 
  39 /*
  40  * This file contains the implementation of zulu_hat: an XHAT provider
  41  * to support the MMU for the XVR-4000 graphics accelerator (code name zulu).
  42  *
  43  * The zulu hat is linked into the kernel misc module zuluvm.
  44  * zuluvm provides services that the zulu device driver module requires
  45  * that are not part of the standard ddi. See PSARC 2002/231.
  46  *
  47  * The zulu driver is delivered by the graphics consolidation.
  48  * zuluvm is in ON workspace.
  49  *
  50  * There are two types of interfaces provided by zulu_hat
  51  *   1. The set of functions and data structures used by zuluvm to obtain
  52  *      tte entries for the zulu MMU and to manage the association between
  53  *      user process's address spaces and zulu graphics contexts.
  54  *
  55  *   2. The entry points required for an XHAT provider: zulu_hat_ops
  56  */
  57 
  58 /*
  59  * zulu_ctx_tab contains an array of pointers to the zulu_hats.
  60  *
  61  * During zulu graphics context switch, the zulu MMU's current context register
  62  * is set to the index of the process's zulu hat's location in the array
  63  * zulu_ctx_tab.
  64  *
  65  * This allows the TL=1 TLB miss handler to quickly find the zulu hat and
  66  * lookup a tte in the zulu hat's TSB.
  67  *
  68  * To synchronize with the trap handler we use bit zero of
  69  * the pointer as a lock bit. See the function zulu_ctx_tsb_lock_enter().
  70  *
  71  * If the trap handler finds the ctx locked it doesn't wait, it
  72  * posts a soft interrupt which is handled at TL=0.
  73  */
  74 
  75 #define         ZULU_HAT_MAX_CTX 32
  76 struct zulu_hat *zulu_ctx_tab[ZULU_HAT_MAX_CTX];
  77 
  78 /*
  79  * To avoid searching through the whole zulu_ctx_tab for a free slot,
  80  * we maintain the value of zulu_ctx_search_start.
  81  *
  82  * This value is a guess as to where a free slot in the context table might be.
  83  * All slots < zulu_ctx_search_start are definitely occupied.
  84  */
  85 static int zulu_ctx_search_start = 0;
  86 
  87 
  88 /*
  89  * this mutex protects the zulu_ctx_tab and zulu_ctx_search_start
  90  */
  91 static kmutex_t zulu_ctx_lock;
  92 
  93 
  94 uint64_t        zulu_tsb_hit = 0;       /* assembly code increments this */
  95 static uint64_t zulu_tsb_miss = 0;
  96 static uint64_t zulu_as_fault = 0;
  97 
  98 /*
  99  * The zulu device has two zulu data mmus.
 100  * We use the base pagesize for one of them and the and 4M for the other.
 101  */
 102 extern int zuluvm_base_pgsize;
 103 
 104 
 105 
 106 /*
 107  * call zuluvm to remove translations for a page
 108  */
 109 static void
 110 zulu_hat_demap_page(struct zulu_hat *zhat, caddr_t vaddr, int size)
 111 {
 112         if (zhat->zulu_ctx < 0) {
 113                 /* context has been stolen, so page is already demapped */
 114                 return;
 115         }
 116         zuluvm_demap_page(zhat->zdev, NULL, zhat->zulu_ctx, vaddr, size);
 117 }
 118 
 119 static void
 120 zulu_hat_demap_ctx(void *zdev, int zulu_ctx)
 121 {
 122         if (zulu_ctx < 0) {
 123                 /* context has been stolen */
 124                 return;
 125         }
 126         zuluvm_demap_ctx(zdev, zulu_ctx);
 127 }
 128 
 129 
 130 /*
 131  * steal the least recently used context slot.
 132  */
 133 static int
 134 zulu_hat_steal_ctx()
 135 {
 136         int             ctx;
 137         hrtime_t        delta = INT64_MAX;
 138         struct zulu_hat *zhat_oldest = NULL;
 139 
 140         ASSERT(mutex_owned(&zulu_ctx_lock));
 141 
 142         for (ctx = 0; ctx < ZULU_HAT_MAX_CTX; ctx++) {
 143                 struct zulu_hat *zhat = ZULU_CTX_GET_HAT(ctx);
 144 
 145                 /*
 146                  * we shouldn't be here unless all slots are occupied
 147                  */
 148                 ASSERT(zhat != NULL);
 149 
 150                 TNF_PROBE_3(steal_ctx_loop, "zulu_hat", /* CSTYLED */,
 151                     tnf_int, ctx, ctx,
 152                     tnf_long, last_used, zhat->last_used,
 153                     tnf_long, oldest, delta);
 154 
 155                 if (zhat->last_used <  delta) {
 156                         zhat_oldest = zhat;
 157                         delta  = zhat->last_used;
 158                 }
 159         }
 160 
 161         ASSERT(zhat_oldest != NULL);
 162 
 163         mutex_enter(&zhat_oldest->lock);
 164 
 165         /* Nobody should have the tsb lock bit set here */
 166         ASSERT(((uint64_t)zulu_ctx_tab[zhat_oldest->zulu_ctx] & ZULU_CTX_LOCK)
 167             == 0);
 168 
 169         ctx = zhat_oldest->zulu_ctx;
 170         zhat_oldest->zulu_ctx = -1;
 171 
 172         ZULU_CTX_SET_HAT(ctx, NULL);
 173 
 174         zulu_hat_demap_ctx(zhat_oldest->zdev, ctx);
 175 
 176         mutex_exit(&zhat_oldest->lock);
 177 
 178         TNF_PROBE_1(zulu_hat_steal_ctx, "zulu_hat", /* CSTYLED */,
 179                 tnf_int, ctx, ctx);
 180 
 181         return (ctx);
 182 }
 183 
 184 /*
 185  * find a slot in the context table for a zulu_hat
 186  */
 187 static void
 188 zulu_hat_ctx_alloc(struct zulu_hat *zhat)
 189 {
 190         int             ctx;
 191 
 192         mutex_enter(&zulu_ctx_lock);
 193 
 194         for (ctx = zulu_ctx_search_start; ctx < ZULU_HAT_MAX_CTX; ctx++) {
 195                 if (ZULU_CTX_IS_FREE(ctx)) {
 196                         zulu_ctx_search_start = ctx + 1;
 197                         break;
 198                 }
 199         }
 200 
 201         if (ctx == ZULU_HAT_MAX_CTX) {
 202                 /* table is full need to steal an entry */
 203                 zulu_ctx_search_start = ZULU_HAT_MAX_CTX;
 204                 ctx = zulu_hat_steal_ctx();
 205         }
 206 
 207         mutex_enter(&zhat->lock);
 208 
 209         ZULU_CTX_SET_HAT(ctx, zhat);
 210         zhat->zulu_ctx = ctx;
 211 
 212         mutex_exit(&zhat->lock);
 213 
 214         mutex_exit(&zulu_ctx_lock);
 215 
 216         TNF_PROBE_2(zulu_hat_ctx_alloc, "zulu_hat", /* CSTYLED */,
 217                 tnf_opaque, zhat, zhat, tnf_int, ctx, ctx);
 218 }
 219 
 220 /*
 221  * zulu_hat_validate_ctx: Called before the graphics context associated
 222  * with a given zulu hat becomes the current zulu graphics context.
 223  * Make sure that the hat has a slot in zulu_ctx_tab.
 224  */
 225 void
 226 zulu_hat_validate_ctx(struct zulu_hat *zhat)
 227 {
 228         if (zhat->zulu_ctx < 0)  {
 229                 zulu_hat_ctx_alloc(zhat);
 230         }
 231         zhat->last_used = gethrtime();
 232 }
 233 
 234 
 235 static void
 236 zulu_hat_ctx_free(struct zulu_hat *zhat)
 237 {
 238         TNF_PROBE_1(zulu_hat_ctx_free, "zulu_hat", /* CSTYLED */,
 239                 tnf_int, ctx, zhat->zulu_ctx);
 240 
 241         mutex_enter(&zulu_ctx_lock);
 242 
 243         mutex_enter(&zhat->lock);
 244         if (zhat->zulu_ctx >= 0) {
 245                 ZULU_CTX_SET_HAT(zhat->zulu_ctx, NULL);
 246 
 247                 if (zulu_ctx_search_start > zhat->zulu_ctx) {
 248                         zulu_ctx_search_start = zhat->zulu_ctx;
 249                 }
 250         }
 251         mutex_exit(&zhat->lock);
 252         mutex_exit(&zulu_ctx_lock);
 253 }
 254 
 255 /*
 256  * Lock the zulu tsb for a given zulu_hat.
 257  *
 258  * We're just protecting against the TLB trap handler here. Other operations
 259  * on the zulu_hat require entering the zhat's lock.
 260  */
 261 static void
 262 zulu_ctx_tsb_lock_enter(struct zulu_hat *zhat)
 263 {
 264         uint64_t        lck;
 265         uint64_t        *plck;
 266 
 267         ASSERT(mutex_owned(&zhat->lock));
 268 
 269         if (zhat->zulu_ctx < 0) {
 270                 return;
 271         }
 272         plck = (uint64_t *)&zulu_ctx_tab[zhat->zulu_ctx];
 273 
 274         for (; ; ) {
 275                 lck = *plck;
 276                 if (!(lck & ZULU_CTX_LOCK)) {
 277                         uint64_t old_lck, new_lck;
 278 
 279                         new_lck = lck | ZULU_CTX_LOCK;
 280 
 281                         old_lck = cas64(plck, lck, new_lck);
 282 
 283                         if (old_lck == lck) {
 284                                 /*
 285                                  * success
 286                                  */
 287                                 break;
 288                         }
 289                 }
 290         }
 291 }
 292 
 293 static void
 294 zulu_ctx_tsb_lock_exit(struct zulu_hat *zhat)
 295 {
 296         uint64_t        lck;
 297         int             zulu_ctx = zhat->zulu_ctx;
 298 
 299         if (zulu_ctx < 0) {
 300                 return;
 301         }
 302         lck = (uint64_t)zulu_ctx_tab[zulu_ctx];
 303         ASSERT(lck & ZULU_CTX_LOCK);
 304         lck &= ~ZULU_CTX_LOCK;
 305         zulu_ctx_tab[zulu_ctx] = (struct zulu_hat *)lck;
 306 }
 307 
 308 /*
 309  * Each zulu hat has a "shadow tree" which is a table of 4MB address regions
 310  * for which the zhat has mappings.
 311  *
 312  * This table is maintained in an avl tree.
 313  * Nodes in the tree are called shadow blocks (or sblks)
 314  *
 315  * This data structure allows unload operations by (address, range) to be
 316  * much more efficent.
 317  *
 318  * We get called a lot for address ranges that have never been supplied
 319  * to zulu.
 320  */
 321 
 322 /*
 323  * compare the base address of two nodes in the shadow tree
 324  */
 325 static int
 326 zulu_shadow_tree_compare(const void *a, const void *b)
 327 {
 328         struct zulu_shadow_blk *zba = (struct zulu_shadow_blk *)a;
 329         struct zulu_shadow_blk *zbb = (struct zulu_shadow_blk *)b;
 330         uint64_t                addr_a = zba->ivaddr;
 331         uint64_t                addr_b = zbb->ivaddr;
 332 
 333         TNF_PROBE_2(zulu_shadow_tree_compare, "zulu_shadow_tree", /* CSTYLED */,
 334                 tnf_opaque, addr_a, addr_a, tnf_opaque, addr_b, addr_b);
 335 
 336         if (addr_a < addr_b) {
 337                 return (-1);
 338         } else if (addr_a > addr_b) {
 339                 return (1);
 340         } else {
 341                 return (0);
 342         }
 343 }
 344 
 345 /*
 346  * lookup the entry in the shadow tree for a given virtual address
 347  */
 348 static struct zulu_shadow_blk *
 349 zulu_shadow_tree_lookup(struct zulu_hat *zhat, uint64_t ivaddr,
 350         avl_index_t *where)
 351 {
 352         struct zulu_shadow_blk proto;
 353         struct zulu_shadow_blk *sblk;
 354 
 355         proto.ivaddr = ivaddr & ZULU_SHADOW_BLK_MASK;
 356 
 357         /*
 358          * pages typically fault in in order so we cache the last shadow
 359          * block that was referenced so we usually get to reduce calls to
 360          * avl_find.
 361          */
 362         if ((zhat->sblk_last != NULL) &&
 363             (proto.ivaddr == zhat->sblk_last->ivaddr)) {
 364                 sblk = zhat->sblk_last;
 365         } else {
 366                 sblk = (struct zulu_shadow_blk *)avl_find(&zhat->shadow_tree,
 367                     &proto, where);
 368                 zhat->sblk_last = sblk;
 369         }
 370 
 371         TNF_PROBE_2(zulu_shadow_tree_lookup, "zulu_shadow_tree", /* CSTYLED */,
 372             tnf_opaque, ivaddr, proto.ivaddr,
 373             tnf_opaque, where, where ? *where : ~0);
 374 
 375         return (sblk);
 376 }
 377 
 378 /*
 379  * insert a sblk into the shadow tree for a given zblk.
 380  * If a sblk already exists, just increment it's refcount.
 381  */
 382 static void
 383 zulu_shadow_tree_insert(struct zulu_hat *zhat, struct zulu_hat_blk *zblk)
 384 {
 385         avl_index_t             where;
 386         struct zulu_shadow_blk  *sblk  = NULL;
 387         uint64_t                ivaddr;
 388         uint64_t                end;
 389 
 390         ivaddr = zblk->zulu_hat_blk_vaddr & ZULU_SHADOW_BLK_MASK;
 391 
 392         end = zblk->zulu_hat_blk_vaddr + ZULU_HAT_PGSZ(zblk->zulu_hat_blk_size);
 393 
 394         sblk = zulu_shadow_tree_lookup(zhat, ivaddr, &where);
 395         if (sblk != NULL) {
 396                 sblk->ref_count++;
 397 
 398                 end = zblk->zulu_hat_blk_vaddr +
 399                     ZULU_HAT_PGSZ(zblk->zulu_hat_blk_size);
 400                 if (zblk->zulu_hat_blk_vaddr < sblk->min_addr) {
 401                         sblk->min_addr = zblk->zulu_hat_blk_vaddr;
 402                 }
 403                 /*
 404                  * a blk can set both the minimum and maximum when it
 405                  * is the first zblk added to a previously emptied sblk
 406                  */
 407                 if (end > sblk->max_addr) {
 408                         sblk->max_addr = end;
 409                 }
 410         } else {
 411                 sblk = kmem_zalloc(sizeof (*sblk), KM_SLEEP);
 412                 sblk->ref_count = 1;
 413                 sblk->ivaddr = ivaddr;
 414                 sblk->min_addr = zblk->zulu_hat_blk_vaddr;
 415                 sblk->max_addr = end;
 416                 zhat->sblk_last = sblk;
 417 
 418                 avl_insert(&zhat->shadow_tree, sblk, where);
 419         }
 420         zblk->zulu_shadow_blk = sblk;
 421         TNF_PROBE_2(zulu_shadow_tree_insert, "zulu_shadow_tree", /* CSTYLED */,
 422             tnf_opaque, vaddr, ivaddr,
 423             tnf_opaque, ref_count, sblk->ref_count);
 424 }
 425 
 426 /*
 427  * decrement the ref_count for the sblk that corresponds to a given zblk.
 428  * When the ref_count goes to zero remove the sblk from the tree and free it.
 429  */
 430 
 431 static void
 432 zulu_shadow_tree_delete(struct zulu_hat *zhat, struct zulu_hat_blk *zblk)
 433 {
 434         struct zulu_shadow_blk  *sblk;
 435 
 436         ASSERT(zblk->zulu_shadow_blk != NULL);
 437 
 438         sblk = zblk->zulu_shadow_blk;
 439 
 440         TNF_PROBE_2(zulu_shadow_tree_delete, "zulu_shadow_tree", /* CSTYLED */,
 441             tnf_opaque, vaddr, sblk->ivaddr,
 442             tnf_opaque, ref_count, sblk->ref_count-1);
 443 
 444         if (--sblk->ref_count == 0) {
 445                 if (zhat->sblk_last == sblk) {
 446                         zhat->sblk_last = NULL;
 447                 }
 448                 sblk->min_addr = sblk->ivaddr + ZULU_SHADOW_BLK_RANGE;
 449                 sblk->max_addr = sblk->ivaddr;
 450         } else {
 451                 /*
 452                  * Update the high and low water marks for this sblk.
 453                  * These are estimates, because we don't know if the previous
 454                  * or next region are actually occupied, but we can tell
 455                  * whether the previous values have become invalid.
 456                  *
 457                  * In the most often applied case a segment is being
 458                  * unloaded, and the min_addr will be kept up to date as
 459                  * the zblks are deleted in order.
 460                  */
 461                 uint64_t end = zblk->zulu_hat_blk_vaddr +
 462                     ZULU_HAT_PGSZ(zblk->zulu_hat_blk_size);
 463 
 464                 if (zblk->zulu_hat_blk_vaddr == sblk->min_addr) {
 465                         sblk->min_addr = end;
 466                 }
 467                 if (end == sblk->max_addr) {
 468                         sblk->max_addr = zblk->zulu_hat_blk_vaddr;
 469                 }
 470         }
 471 
 472         zblk->zulu_shadow_blk = NULL;
 473 }
 474 
 475 static void
 476 zulu_shadow_tree_destroy(struct zulu_hat *zhat)
 477 {
 478         struct zulu_shadow_blk *sblk;
 479         void    *cookie = NULL;
 480 
 481         while ((sblk = (struct zulu_shadow_blk *)avl_destroy_nodes(
 482             &zhat->shadow_tree, &cookie)) != NULL) {
 483                 TNF_PROBE_2(shadow_tree_destroy, "zulu_hat", /* CSTYLED */,
 484                     tnf_opaque, vaddr, sblk->ivaddr,
 485                     tnf_opaque, ref_count, sblk->ref_count);
 486                 kmem_free(sblk, sizeof (*sblk));
 487         }
 488         avl_destroy(&zhat->shadow_tree);
 489 }
 490 
 491 /*
 492  * zulu_hat_insert_map:
 493  *
 494  * Add a zulu_hat_blk to the a zhat's mappings list.
 495  *
 496  * Several data stuctures are used
 497  *      tsb: for simple fast lookups by the trap handler
 498  *      hash table: for efficent lookups by address, range
 499  *      An shadow tree of 4MB ranges with mappings for unloading big regions.
 500  */
 501 static void
 502 zulu_hat_insert_map(struct zulu_hat *zhat, struct zulu_hat_blk *zblk)
 503 {
 504         int tsb_hash;
 505 
 506         tsb_hash = ZULU_TSB_HASH(zblk->zulu_hat_blk_vaddr,
 507             zblk->zulu_hat_blk_size, zhat->zulu_tsb_size);
 508 
 509         TNF_PROBE_3(zulu_hat_insert_map, "zulu_hat", /* CSTYLED */,
 510             tnf_opaque, zblkp, zblk,
 511             tnf_opaque, vaddr, zblk->zulu_hat_blk_vaddr,
 512             tnf_opaque, hash, tsb_hash);
 513 
 514         ASSERT(tsb_hash < zhat->zulu_tsb_size);
 515 
 516         zulu_shadow_tree_insert(zhat, zblk);
 517 
 518         /*
 519          * The hash table is an array of buckets. Each bucket is the
 520          * head of a linked list of mappings who's address hashess to the bucket
 521          * New entries go to the head of the list.
 522          */
 523         zblk->zulu_hash_prev = NULL;
 524         zblk->zulu_hash_next = ZULU_MAP_HASH_HEAD(zhat,
 525             zblk->zulu_hat_blk_vaddr, zblk->zulu_hat_blk_size);
 526         if (zblk->zulu_hash_next) {
 527                 zblk->zulu_hash_next->zulu_hash_prev = zblk;
 528         }
 529         ZULU_MAP_HASH_HEAD(zhat, zblk->zulu_hat_blk_vaddr,
 530             zblk->zulu_hat_blk_size) = zblk;
 531 
 532         zulu_ctx_tsb_lock_enter(zhat);
 533         zhat->zulu_tsb[tsb_hash] = zblk->zulu_hat_blk_tte;
 534         zulu_ctx_tsb_lock_exit(zhat);
 535 }
 536 
 537 /*
 538  * remove a block from a zhat
 539  */
 540 static void
 541 zulu_hat_remove_map(struct zulu_hat *zhat, struct zulu_hat_blk *zblk)
 542 {
 543         int tsb_hash = ZULU_TSB_HASH(zblk->zulu_hat_blk_vaddr,
 544             zblk->zulu_hat_blk_size, zhat->zulu_tsb_size);
 545 
 546         TNF_PROBE_2(zulu_hat_remove_map, "zulu_hat", /* CSTYLED */,
 547             tnf_opaque, vaddr, zblk->zulu_hat_blk_vaddr,
 548             tnf_opaque, hash, tsb_hash);
 549 
 550         ASSERT(tsb_hash < zhat->zulu_tsb_size);
 551         ASSERT(mutex_owned(&zhat->lock));
 552 
 553         zulu_shadow_tree_delete(zhat, zblk);
 554 
 555         /*
 556          * first remove zblk from hash table
 557          */
 558         if (zblk->zulu_hash_prev) {
 559                 zblk->zulu_hash_prev->zulu_hash_next = zblk->zulu_hash_next;
 560         } else {
 561                 ZULU_MAP_HASH_HEAD(zhat, zblk->zulu_hat_blk_vaddr,
 562                     zblk->zulu_hat_blk_size) = NULL;
 563         }
 564         if (zblk->zulu_hash_next) {
 565                 zblk->zulu_hash_next->zulu_hash_prev = zblk->zulu_hash_prev;
 566         }
 567         zblk->zulu_hash_next = NULL;
 568         zblk->zulu_hash_prev = NULL;
 569 
 570         /*
 571          * then remove the tsb entry
 572          */
 573         zulu_ctx_tsb_lock_enter(zhat);
 574         if (zhat->zulu_tsb[tsb_hash].un.zulu_tte_addr ==
 575             zblk->zulu_hat_blk_vaddr) {
 576                 zhat->zulu_tsb[tsb_hash].zulu_tte_valid = 0;
 577         }
 578         zulu_ctx_tsb_lock_exit(zhat);
 579 }
 580 
 581 /*
 582  * look for a mapping to a given vaddr and page size
 583  */
 584 static struct zulu_hat_blk *
 585 zulu_lookup_map_bysize(struct zulu_hat *zhat, caddr_t vaddr, int page_sz)
 586 {
 587         struct          zulu_hat_blk *zblkp;
 588         uint64_t        ivaddr = (uint64_t)vaddr;
 589         int             blks_checked = 0;
 590 
 591         ASSERT(mutex_owned(&zhat->lock));
 592 
 593         for (zblkp = ZULU_MAP_HASH_HEAD(zhat, ivaddr, page_sz); zblkp != NULL;
 594             zblkp = zblkp->zulu_hash_next) {
 595                 uint64_t        size;
 596                 uint64_t        iaddr;
 597 
 598                 blks_checked++;
 599 
 600                 size = ZULU_HAT_PGSZ(zblkp->zulu_hat_blk_size);
 601                 iaddr = ZULU_VADDR((uint64_t)zblkp->zulu_hat_blk_vaddr);
 602 
 603                 if (iaddr <= ivaddr && (iaddr + size) > ivaddr) {
 604                         int tsb_hash;
 605 
 606                         tsb_hash = ZULU_TSB_HASH(zblkp->zulu_hat_blk_vaddr,
 607                             zblkp->zulu_hat_blk_size,
 608                             zhat->zulu_tsb_size);
 609                         ASSERT(tsb_hash < zhat->zulu_tsb_size);
 610 
 611                         zulu_ctx_tsb_lock_enter(zhat);
 612                         zhat->zulu_tsb[tsb_hash] = zblkp->zulu_hat_blk_tte;
 613                         zulu_ctx_tsb_lock_exit(zhat);
 614                         break;
 615                 }
 616 
 617         }
 618 
 619         TNF_PROBE_3(zulu_hat_lookup_map_bysz, "zulu_hat", /* CSTYLED */,
 620             tnf_opaque, zblkp, zblkp,
 621             tnf_int, blks_checked, blks_checked,
 622             tnf_int, page_sz, page_sz);
 623 
 624         return (zblkp);
 625 }
 626 
 627 /*
 628  * Lookup a zblk for a given virtual address.
 629  */
 630 static struct zulu_hat_blk *
 631 zulu_lookup_map(struct zulu_hat *zhat, caddr_t vaddr)
 632 {
 633         struct          zulu_hat_blk *zblkp = NULL;
 634 
 635         /*
 636          * if the hat is using 4M pages, look first for a 4M page
 637          */
 638         if (zhat->map4m) {
 639                 zblkp = zulu_lookup_map_bysize(zhat, vaddr, ZULU_TTE4M);
 640                 if (zblkp != NULL) {
 641                         return (zblkp);
 642                 }
 643         }
 644         /*
 645          * Otherwise look for a 8k page
 646          * Note: if base pagesize gets increased to 64K remove this test
 647          */
 648         if (zhat->map8k) {
 649                 zblkp = zulu_lookup_map_bysize(zhat, vaddr, ZULU_TTE8K);
 650                 if (zblkp != NULL) {
 651                         return (zblkp);
 652                 }
 653         }
 654         /*
 655          * only if the page isn't found in the sizes that match the zulu mmus
 656          * look for the inefficient 64K or 512K page sizes
 657          */
 658         if (zhat->map64k) {
 659                 zblkp = zulu_lookup_map_bysize(zhat, vaddr, ZULU_TTE64K);
 660                 if (zblkp != NULL) {
 661                         return (zblkp);
 662                 }
 663         }
 664         if (zhat->map512k) {
 665                 zblkp = zulu_lookup_map_bysize(zhat, vaddr, ZULU_TTE512K);
 666         }
 667 
 668         return (zblkp);
 669 }
 670 
 671 /*
 672  * zulu_hat_load: Load translation for given vaddr
 673  */
 674 int
 675 zulu_hat_load(struct zulu_hat *zhat, caddr_t vaddr,
 676                 enum seg_rw rw, int *ppg_size)
 677 {
 678         faultcode_t             as_err;
 679         struct zulu_hat_blk     *zblkp;
 680         int                     rval;
 681         uint64_t                flags_pfn;
 682         struct zulu_tte         tte;
 683 
 684         TNF_PROBE_2(zulu_hat_load, "zulu_hat", /* CSTYLED */,
 685             tnf_int, zulu_ctx, zhat->zulu_ctx,
 686             tnf_opaque, vaddr, vaddr);
 687 
 688         mutex_enter(&zhat->lock);
 689         ASSERT(zhat->zulu_ctx >= 0);
 690         /*
 691          * lookup in our tsb first
 692          */
 693         zulu_ctx_tsb_lock_enter(zhat);
 694         flags_pfn = zulu_hat_tsb_lookup_tl0(zhat, vaddr);
 695         zulu_ctx_tsb_lock_exit(zhat);
 696 
 697         if (flags_pfn) {
 698                 uint64_t *p = (uint64_t *)&tte;
 699 
 700                 p++;                    /* ignore the tag */
 701                 *p = flags_pfn;         /* load the flags */
 702 
 703                 zuluvm_load_tte(zhat, vaddr, flags_pfn, tte.zulu_tte_perm,
 704                     tte.zulu_tte_size);
 705                 if (ppg_size != NULL) {
 706                         *ppg_size = tte.zulu_tte_size;
 707                 }
 708 
 709                 zulu_tsb_hit++;
 710                 mutex_exit(&zhat->lock);
 711                 return (0);
 712         }
 713 
 714         zulu_tsb_miss++;
 715 
 716         zblkp = zulu_lookup_map(zhat, vaddr);
 717         if (zblkp) {
 718                 tte = zblkp->zulu_hat_blk_tte;
 719                 tte.zulu_tte_pfn = ZULU_HAT_ADJ_PFN((&tte), vaddr);
 720                 zuluvm_load_tte(zhat, vaddr,  tte.zulu_tte_pfn,
 721                     tte.zulu_tte_perm, tte.zulu_tte_size);
 722                 if (ppg_size != NULL) {
 723                         *ppg_size = tte.zulu_tte_size;
 724                 }
 725                 mutex_exit(&zhat->lock);
 726                 return (0);
 727         }
 728 
 729         /*
 730          * Set a flag indicating that we're processing a fault.
 731          * See comments in zulu_hat_unload_region.
 732          */
 733         zhat->in_fault = 1;
 734         mutex_exit(&zhat->lock);
 735 
 736         zulu_as_fault++;
 737         TNF_PROBE_0(calling_as_fault, "zulu_hat", /* CSTYLED */);
 738 
 739         as_err = as_fault((struct hat *)zhat, zhat->zulu_xhat.xhat_as,
 740             (caddr_t)(ZULU_VADDR((uint64_t)vaddr) & PAGEMASK),
 741             PAGESIZE, F_INVAL, rw);
 742 
 743         mutex_enter(&zhat->lock);
 744         zhat->in_fault = 0;
 745         if (ppg_size != NULL) {
 746                 /*
 747                  * caller wants to know the page size (used by preload)
 748                  */
 749                 zblkp = zulu_lookup_map(zhat, vaddr);
 750                 if (zblkp != NULL) {
 751                         *ppg_size = zblkp->zulu_hat_blk_size;
 752                 } else {
 753                         *ppg_size = -1;
 754                 }
 755         }
 756         mutex_exit(&zhat->lock);
 757 
 758         TNF_PROBE_1(as_fault_returned, "zulu_hat", /* CSTYLED */,
 759                 tnf_int, as_err, as_err);
 760 
 761         if (as_err != 0) {
 762                 printf("as_fault returned %d\n", as_err);
 763                 rval = as_err;
 764         } else if (zhat->freed) {
 765                 rval = -1;
 766         } else {
 767                 rval = 0;
 768         }
 769 
 770         return (rval);
 771 }
 772 
 773 static struct xhat *
 774 zulu_hat_alloc(void *arg)
 775 {
 776         struct zulu_hat *zhat = kmem_zalloc(sizeof (struct zulu_hat), KM_SLEEP);
 777 
 778         (void) arg;
 779 
 780         zulu_hat_ctx_alloc(zhat);
 781 
 782         mutex_init(&zhat->lock, NULL, MUTEX_DEFAULT, NULL);
 783 
 784         zhat->zulu_tsb = kmem_zalloc(ZULU_TSB_SZ, KM_SLEEP);
 785         zhat->zulu_tsb_size = ZULU_TSB_NUM;
 786         zhat->hash_tbl = kmem_zalloc(ZULU_HASH_TBL_SZ, KM_SLEEP);
 787         avl_create(&zhat->shadow_tree, zulu_shadow_tree_compare,
 788             sizeof (zhat->shadow_tree), ZULU_SHADOW_BLK_LINK_OFFSET);
 789         /*
 790          * The zulu hat has a few opaque data structs embedded in it.
 791          * This tag makes finding the our data easier with a debugger.
 792          */
 793         zhat->magic = 0x42;
 794 
 795         zhat->freed = 0;
 796         TNF_PROBE_1(zulu_hat_alloc, "zulu_hat", /* CSTYLED */,
 797                 tnf_int, zulu_ctx, zhat->zulu_ctx);
 798         return ((struct xhat *)zhat);
 799 }
 800 
 801 static void
 802 zulu_hat_free(struct xhat *xhat)
 803 {
 804         struct zulu_hat *zhat = (struct zulu_hat *)xhat;
 805 
 806         TNF_PROBE_1(zulu_hat_free, "zulu_hat", /* CSTYLED */,
 807                 tnf_int, zulu_ctx, zhat->zulu_ctx);
 808 
 809         zulu_shadow_tree_destroy(zhat);
 810         kmem_free(zhat->hash_tbl, ZULU_HASH_TBL_SZ);
 811         kmem_free(zhat->zulu_tsb, ZULU_TSB_SZ);
 812         mutex_destroy(&zhat->lock);
 813         kmem_free(xhat, sizeof (struct zulu_hat));
 814 }
 815 
 816 static void
 817 zulu_hat_free_start(struct xhat *xhat)
 818 {
 819         struct zulu_hat *zhat = (struct zulu_hat *)xhat;
 820 
 821         TNF_PROBE_1(zulu_hat_free_start, "zulu_hat", /* CSTYLED */,
 822                 tnf_int, zulu_ctx, zhat->zulu_ctx);
 823         (void) xhat;
 824 }
 825 
 826 /*
 827  * zulu_hat_memload: This is the callback where the vm system gives us our
 828  * translations
 829  */
 830 static void
 831 zulu_do_hat_memload(struct xhat *xhat, caddr_t vaddr, struct page *page,
 832     uint_t attr, uint_t flags, int use_pszc)
 833 {
 834         void *blk;
 835         struct zulu_hat *zhat = (struct zulu_hat *)xhat;
 836         struct zulu_hat_blk *zblk;
 837         pfn_t pfn;
 838 
 839         TNF_PROBE_4(zulu_hat_memload, "zulu_hat", /* CSTYLED */,
 840             tnf_int, zulu_ctx, zhat->zulu_ctx,
 841             tnf_opaque, vaddr, vaddr, tnf_opaque, attr, attr,
 842             tnf_opaque, flags, flags);
 843 
 844         /*
 845          * keep track of the highest address that this zhat has had
 846          * a mapping for.
 847          * We use this in unload to avoid searching for regions that
 848          * we've never seen.
 849          *
 850          * This is particularly useful avoiding repeated searches for
 851          * for the process's mappings to the zulu hardware. These mappings
 852          * are explicitly unloaded at each graphics context switch..
 853          *
 854          * This takes advantage of the fact that the device addresses
 855          * are always above than the heap where most DMA data is stored.
 856          */
 857         if (vaddr > zhat->vaddr_max) {
 858                 zhat->vaddr_max = vaddr;
 859         }
 860 
 861         pfn = xhat_insert_xhatblk(page, xhat, &blk);
 862         zblk = (struct zulu_hat_blk *)blk;
 863         zblk->zulu_hat_blk_vaddr = (uintptr_t)vaddr;
 864         zblk->zulu_hat_blk_pfn = (uint_t)pfn;
 865         /*
 866          * The perm bit is actually in the tte which gets copied to the TSB
 867          */
 868         zblk->zulu_hat_blk_perm = (attr & PROT_WRITE) ? 1 : 0;
 869         zblk->zulu_hat_blk_size = use_pszc ? page->p_szc : 0;
 870         zblk->zulu_hat_blk_valid = 1;
 871 
 872         switch (zblk->zulu_hat_blk_size) {
 873         case    ZULU_TTE8K:
 874                 zhat->map8k = 1;
 875                 break;
 876         case    ZULU_TTE64K:
 877                 zhat->map64k = 1;
 878                 break;
 879         case    ZULU_TTE512K:
 880                 zhat->map512k = 1;
 881                 break;
 882         case    ZULU_TTE4M:
 883                 zhat->map4m = 1;
 884                 break;
 885         default:
 886                 panic("zulu_hat illegal page size\n");
 887         }
 888 
 889         mutex_enter(&zhat->lock);
 890 
 891         zulu_hat_insert_map(zhat, zblk);
 892         if (!zhat->freed) {
 893                 zuluvm_load_tte(zhat, vaddr, zblk->zulu_hat_blk_pfn,
 894                     zblk->zulu_hat_blk_perm, zblk->zulu_hat_blk_size);
 895         }
 896         zhat->fault_ivaddr_last =
 897             ZULU_VADDR((uint64_t)zblk->zulu_hat_blk_vaddr);
 898 
 899         mutex_exit(&zhat->lock);
 900 }
 901 
 902 static void
 903 zulu_hat_memload(struct xhat *xhat, caddr_t vaddr, struct page *page,
 904     uint_t attr, uint_t flags)
 905 {
 906         zulu_do_hat_memload(xhat, vaddr, page, attr, flags, 0);
 907 }
 908 
 909 static void
 910 zulu_hat_devload(struct xhat *xhat, caddr_t vaddr, size_t size, pfn_t pfn,
 911         uint_t attr, int flags)
 912 {
 913         struct page *pp = page_numtopp_nolock(pfn);
 914         (void) size;
 915         zulu_do_hat_memload(xhat, vaddr, pp, attr, (uint_t)flags, 1);
 916 }
 917 
 918 static void
 919 zulu_hat_memload_array(struct xhat *xhat, caddr_t addr, size_t len,
 920     struct page **gen_pps, uint_t attr, uint_t flags)
 921 {
 922         struct zulu_hat *zhat = (struct zulu_hat *)xhat;
 923 
 924         TNF_PROBE_3(zulu_hat_memload_array, "zulu_hat", /* CSTYLED */,
 925             tnf_int, zulu_ctx, zhat->zulu_ctx,
 926             tnf_opaque, addr, addr,
 927             tnf_opaque, len, len);
 928 
 929         for (; len > 0; len -= ZULU_HAT_PGSZ((*gen_pps)->p_szc),
 930             gen_pps += ZULU_HAT_NUM_PGS((*gen_pps)->p_szc)) {
 931                 zulu_do_hat_memload(xhat, addr, *gen_pps, attr, flags, 1);
 932 
 933                 addr += ZULU_HAT_PGSZ((*gen_pps)->p_szc);
 934         }
 935 }
 936 
 937 static void
 938 free_zblks(struct zulu_hat_blk *free_list)
 939 {
 940         struct zulu_hat_blk *zblkp;
 941         struct zulu_hat_blk *next;
 942 
 943         for (zblkp = free_list; zblkp != NULL; zblkp = next) {
 944                 next = zblkp->zulu_hash_next;
 945                 (void) xhat_delete_xhatblk((struct xhat_hme_blk *)zblkp, 0);
 946         }
 947 }
 948 
 949 static void
 950 add_to_free_list(struct zulu_hat_blk **pfree_list, struct zulu_hat_blk *zblk)
 951 {
 952         zblk->zulu_hash_next = *pfree_list;
 953         *pfree_list = zblk;
 954 }
 955 
 956 static void
 957 zulu_hat_unload_region(struct zulu_hat *zhat, uint64_t ivaddr, size_t size,
 958                 struct zulu_shadow_blk *sblk, struct zulu_hat_blk **pfree_list)
 959 {
 960         uint64_t        end = ivaddr + size;
 961         int             found = 0;
 962 
 963         TNF_PROBE_2(zulu_hat_unload_region, "zulu_hat", /* CSTYLED */,
 964                 tnf_opaque, vaddr, ivaddr, tnf_opaque, size, size);
 965 
 966         /*
 967          * check address against the low and highwater marks for mappings
 968          * in this sblk
 969          */
 970         if (ivaddr < sblk->min_addr) {
 971                 ivaddr = sblk->min_addr;
 972                 TNF_PROBE_1(zulu_hat_unload_skip, "zulu_hat", /* CSTYLED */,
 973                         tnf_opaque, ivaddr, ivaddr);
 974         }
 975         if (end > sblk->max_addr) {
 976                 end = sblk->max_addr;
 977                 TNF_PROBE_1(zulu_hat_unload_reg_skip, "zulu_hat", /* CSTYLED */,
 978                         tnf_opaque, end, end);
 979         }
 980         /*
 981          * REMIND: It's not safe to touch the sblk after we enter this loop
 982          * because it may get deleted.
 983          */
 984 
 985         while (ivaddr < end) {
 986                 uint64_t iaddr;
 987                 size_t  pg_sz;
 988                 struct zulu_hat_blk *zblkp;
 989 
 990                 zblkp = zulu_lookup_map(zhat, (caddr_t)ivaddr);
 991                 if (zblkp == NULL) {
 992                         ivaddr += PAGESIZE;
 993                         continue;
 994                 }
 995 
 996                 iaddr = ZULU_VADDR((uint64_t)zblkp->zulu_hat_blk_vaddr);
 997                 pg_sz = ZULU_HAT_PGSZ(zblkp->zulu_hat_blk_size);
 998 
 999                 found++;
1000 
1001                 zulu_hat_remove_map(zhat, zblkp);
1002                 /*
1003                  * skip demap page if as_free has already been entered
1004                  * zuluvm demapped the context already
1005                  */
1006                 if (!zhat->freed) {
1007                         if ((zhat->in_fault) &&
1008                             (iaddr == zhat->fault_ivaddr_last)) {
1009                                 /*
1010                                  * We're being called from within as_fault to
1011                                  * unload the last translation we loaded.
1012                                  *
1013                                  * This is probably due to watchpoint handling.
1014                                  * Delay the demap for a millisecond
1015                                  * to allow zulu to make some progress.
1016                                  */
1017                                 drv_usecwait(1000);
1018                                 zhat->fault_ivaddr_last = 0;
1019                         }
1020                         zulu_hat_demap_page(zhat, (caddr_t)iaddr,
1021                             zblkp->zulu_hat_blk_size);
1022                 }
1023 
1024                 add_to_free_list(pfree_list, zblkp);
1025 
1026                 if ((iaddr + pg_sz) >= end) {
1027                         break;
1028                 }
1029 
1030                 ivaddr += pg_sz;
1031         }
1032         TNF_PROBE_1(zulu_hat_unload_region_done, "zulu_hat", /* CSTYLED */,
1033                 tnf_opaque, found, found);
1034 }
1035 
1036 static void
1037 zulu_hat_unload(struct xhat *xhat, caddr_t vaddr, size_t size, uint_t flags)
1038 {
1039         struct zulu_hat *zhat = (struct zulu_hat *)xhat;
1040         uint64_t        ivaddr;
1041         uint64_t        end;
1042         int             found = 0;
1043         struct zulu_hat_blk *free_list = NULL;
1044 
1045         (void) flags;
1046 
1047         TNF_PROBE_4(zulu_hat_unload, "zulu_hat", /* CSTYLED */,
1048             tnf_int, zulu_ctx, zhat->zulu_ctx,
1049             tnf_opaque, vaddr, vaddr,
1050             tnf_opaque, vaddr_max, zhat->vaddr_max,
1051             tnf_opaque, size, size);
1052 
1053         mutex_enter(&zhat->lock);
1054 
1055         /*
1056          * The following test prevents us from searching for the user's
1057          * mappings to the zulu device registers. Those mappings get unloaded
1058          * every time a graphics context switch away from a given context
1059          * occurs.
1060          *
1061          * Since the heap is located at smaller virtual addresses than the
1062          * registers, this simple test avoids quite a bit of useless work.
1063          */
1064         if (vaddr > zhat->vaddr_max) {
1065                 /*
1066                  * all existing mappings have lower addresses than vaddr
1067                  * no need to search further.
1068                  */
1069                 mutex_exit(&zhat->lock);
1070                 return;
1071         }
1072 
1073         ivaddr = (uint64_t)vaddr;
1074         end = ivaddr + size;
1075 
1076         do {
1077                 struct zulu_shadow_blk *sblk;
1078 
1079                 sblk = zulu_shadow_tree_lookup(zhat, ivaddr, NULL);
1080                 if (sblk != NULL) {
1081                         uint64_t        sblk_end;
1082                         size_t          region_size;
1083 
1084                         found++;
1085 
1086                         sblk_end = (ivaddr + ZULU_SHADOW_BLK_RANGE) &
1087                             ZULU_SHADOW_BLK_MASK;
1088 
1089                         if (sblk_end < end) {
1090                                 region_size = sblk_end - ivaddr;
1091                         } else {
1092                                 region_size = end - ivaddr;
1093                         }
1094                         zulu_hat_unload_region(zhat, ivaddr, region_size, sblk,
1095                             &free_list);
1096 
1097                 }
1098                 ivaddr += ZULU_SHADOW_BLK_RANGE;
1099         } while (ivaddr < end);
1100 
1101         mutex_exit(&zhat->lock);
1102 
1103         free_zblks(free_list);
1104 
1105         TNF_PROBE_1(zulu_hat_unload_done, "zulu_hat", /* CSTYLED */,
1106                 tnf_int, found, found);
1107 }
1108 
1109 static void
1110 zulu_hat_unload_callback(struct xhat *xhat, caddr_t vaddr, size_t size,
1111         uint_t flags, hat_callback_t *pcb)
1112 {
1113         (void) size;
1114         (void) pcb;
1115         zulu_hat_unload(xhat, vaddr, size, flags);
1116 }
1117 
1118 
1119 /*
1120  * unload one page
1121  */
1122 static int
1123 zulu_hat_pageunload(struct xhat *xhat, struct page *pp, uint_t flags,
1124     void *xblk)
1125 {
1126         struct zulu_hat_blk *zblk = (struct zulu_hat_blk *)xblk;
1127         struct zulu_hat *zhat = (struct zulu_hat *)xhat;
1128         int     do_delete;
1129 
1130         (void) pp;
1131         (void) flags;
1132 
1133         TNF_PROBE_3(zulu_hat_pageunload, "zulu_hat", /* CSTYLED */,
1134             tnf_int, zulu_ctx, zhat->zulu_ctx,
1135             tnf_opaque, vaddr, zblk->zulu_hat_blk_vaddr,
1136             tnf_int, pg_size, zblk->zulu_hat_blk_size);
1137 
1138         mutex_enter(&zhat->lock);
1139         if (zblk->zulu_shadow_blk != NULL) {
1140 
1141                 do_delete = 1;
1142 
1143                 zulu_hat_remove_map(zhat, zblk);
1144 
1145                 /*
1146                  * now that the entry is removed from the TSB, remove the
1147                  * translation from the zulu hardware.
1148                  *
1149                  * Skip the demap if this as is in the process of being freed.
1150                  * The zuluvm as callback has demapped the whole context.
1151                  */
1152                 if (!zhat->freed) {
1153                         zulu_hat_demap_page(zhat,
1154                             (caddr_t)(uintptr_t)(zblk->zulu_hat_blk_page <<
1155                             ZULU_HAT_BP_SHIFT),
1156                             zblk->zulu_hat_blk_size);
1157                 }
1158         } else {
1159                 /*
1160                  * This block has already been removed from the zulu_hat,
1161                  * it's on a free list waiting for our thread to release
1162                  * a mutex so it can be freed
1163                  */
1164                 do_delete = 0;
1165 
1166                 TNF_PROBE_0(zulu_hat_pageunload_skip, "zulu_hat",
1167                     /* CSTYLED */);
1168         }
1169         mutex_exit(&zhat->lock);
1170 
1171         if (do_delete) {
1172                 (void) xhat_delete_xhatblk(xblk, 1);
1173         }
1174 
1175         return (0);
1176 }
1177 
1178 static void
1179 zulu_hat_unshare(struct xhat *xhat, caddr_t vaddr, size_t size)
1180 {
1181         TNF_PROBE_0(zulu_hat_unshare, "zulu_hat", /* CSTYLED */);
1182 
1183         zulu_hat_unload(xhat, vaddr, size, 0);
1184 }
1185 
1186 /*
1187  * Functions to manage changes in protections for mappings.
1188  *
1189  * These are rarely called in normal operation so for now just unload
1190  * the region.
1191  * If the mapping is still needed, it will fault in later with the new
1192  * attrributes.
1193  */
1194 typedef enum {
1195         ZULU_HAT_CHGATTR,
1196         ZULU_HAT_SETATTR,
1197         ZULU_HAT_CLRATTR
1198 } zulu_hat_prot_op;
1199 
1200 static void
1201 zulu_hat_update_attr(struct xhat *xhat, caddr_t vaddr, size_t size,
1202         uint_t flags, zulu_hat_prot_op op)
1203 {
1204         struct zulu_hat *zhat = (struct zulu_hat *)xhat;
1205 
1206         TNF_PROBE_5(zulu_hat_changeprot, "zulu_hat", /* CSTYLED */,
1207             tnf_int, ctx, zhat->zulu_ctx,
1208             tnf_opaque, vaddr, vaddr, tnf_opaque, size, size,
1209             tnf_uint, flags, flags, tnf_int, op, op);
1210 
1211         zulu_hat_unload(xhat, vaddr, size, 0);
1212 }
1213 
1214 static void
1215 zulu_hat_chgprot(struct xhat *xhat, caddr_t vaddr, size_t size, uint_t flags)
1216 {
1217         struct zulu_hat *zhat = (struct zulu_hat *)xhat;
1218 #ifdef DEBUG
1219         printf("zulu_hat_chgprot: ctx: %d addr: %lx, size: %lx flags: %x\n",
1220             zhat->zulu_ctx, (uint64_t)vaddr, size, flags);
1221 #endif
1222         zulu_hat_update_attr(xhat, vaddr, size, flags, ZULU_HAT_CHGATTR);
1223 }
1224 
1225 
1226 static void
1227 zulu_hat_setattr(struct xhat *xhat, caddr_t vaddr, size_t size, uint_t flags)
1228 {
1229         struct zulu_hat *zhat = (struct zulu_hat *)xhat;
1230 #ifdef DEBUG
1231         printf("zulu_hat_setattr: ctx: %d addr: %lx, size: %lx flags: %x\n",
1232             zhat->zulu_ctx, (uint64_t)vaddr, size, flags);
1233 #endif
1234         zulu_hat_update_attr(xhat, vaddr, size, flags, ZULU_HAT_SETATTR);
1235 }
1236 
1237 static void
1238 zulu_hat_clrattr(struct xhat *xhat, caddr_t vaddr, size_t size, uint_t flags)
1239 {
1240         struct zulu_hat *zhat = (struct zulu_hat *)xhat;
1241 #ifdef DEBUG
1242         printf("zulu_hat_clrattr: ctx: %d addr: %lx, size: %lx flags: %x\n",
1243             zhat->zulu_ctx, (uint64_t)vaddr, size, flags);
1244 #endif
1245         zulu_hat_update_attr(xhat, vaddr, size, flags, ZULU_HAT_CLRATTR);
1246 }
1247 
1248 static void
1249 zulu_hat_chgattr(struct xhat *xhat, caddr_t vaddr, size_t size, uint_t flags)
1250 {
1251         struct zulu_hat *zhat = (struct zulu_hat *)xhat;
1252         TNF_PROBE_3(zulu_hat_chgattr, "zulu_hat", /* CSTYLED */,
1253             tnf_int, ctx, zhat->zulu_ctx,
1254             tnf_opaque, vaddr, vaddr,
1255             tnf_opaque, flags, flags);
1256 #ifdef DEBUG
1257         printf("zulu_hat_chgattr: ctx: %d addr: %lx, size: %lx flags: %x\n",
1258             zhat->zulu_ctx, (uint64_t)vaddr, size, flags);
1259 #endif
1260         zulu_hat_update_attr(xhat, vaddr, size, flags, ZULU_HAT_CHGATTR);
1261 }
1262 
1263 
1264 struct xhat_ops zulu_hat_ops = {
1265         zulu_hat_alloc,         /* xhat_alloc */
1266         zulu_hat_free,          /* xhat_free */
1267         zulu_hat_free_start,    /* xhat_free_start */
1268         NULL,                   /* xhat_free_end */
1269         NULL,                   /* xhat_dup */
1270         zulu_hat_memload,       /* xhat_memload */
1271         zulu_hat_memload_array, /* xhat_memload_array */
1272         zulu_hat_devload,       /* xhat_devload */
1273         zulu_hat_unload,        /* xhat_unload */
1274         zulu_hat_unload_callback, /* xhat_unload_callback */
1275         zulu_hat_setattr,       /* xhat_setattr */
1276         zulu_hat_clrattr,       /* xhat_clrattr */
1277         zulu_hat_chgattr,       /* xhat_chgattr */
1278         zulu_hat_unshare,       /* xhat_unshare */
1279         zulu_hat_chgprot,       /* xhat_chgprot */
1280         zulu_hat_pageunload,    /* xhat_pageunload */
1281 };
1282 
1283 xblk_cache_t zulu_xblk_cache = {
1284     NULL,
1285     NULL,
1286     NULL,
1287     xhat_xblkcache_reclaim
1288 };
1289 
1290 xhat_provider_t zulu_hat_provider = {
1291         XHAT_PROVIDER_VERSION,
1292         0,
1293         NULL,
1294         NULL,
1295         "zulu_hat_provider",
1296         &zulu_xblk_cache,
1297         &zulu_hat_ops,
1298         sizeof (struct zulu_hat_blk) + sizeof (struct xhat_hme_blk)
1299 };
1300 
1301 /*
1302  * The following functions are the entry points that zuluvm uses.
1303  */
1304 
1305 /*
1306  * initialize this module. Called from zuluvm's _init function
1307  */
1308 int
1309 zulu_hat_init()
1310 {
1311         int     c;
1312         int     rval;
1313         mutex_init(&zulu_ctx_lock, NULL, MUTEX_DEFAULT, NULL);
1314 
1315         for (c = 0; c < ZULU_HAT_MAX_CTX; c++) {
1316                 ZULU_CTX_LOCK_INIT(c);
1317         }
1318         zulu_ctx_search_start = 0;
1319         rval = xhat_provider_register(&zulu_hat_provider);
1320         if (rval != 0) {
1321                 mutex_destroy(&zulu_ctx_lock);
1322         }
1323         return (rval);
1324 }
1325 
1326 /*
1327  * un-initialize this module. Called from zuluvm's _fini function
1328  */
1329 int
1330 zulu_hat_destroy()
1331 {
1332         if (xhat_provider_unregister(&zulu_hat_provider) != 0) {
1333                 return (-1);
1334         }
1335         mutex_destroy(&zulu_ctx_lock);
1336         return (0);
1337 }
1338 
1339 int
1340 zulu_hat_attach(void *arg)
1341 {
1342         (void) arg;
1343         return (0);
1344 }
1345 
1346 int
1347 zulu_hat_detach(void *arg)
1348 {
1349         (void) arg;
1350         return (0);
1351 }
1352 
1353 /*
1354  * create a zulu hat for this address space.
1355  */
1356 struct zulu_hat *
1357 zulu_hat_proc_attach(struct as *as, void *zdev)
1358 {
1359         struct zulu_hat *zhat;
1360         int             xhat_rval;
1361 
1362         xhat_rval = xhat_attach_xhat(&zulu_hat_provider, as,
1363             (struct xhat **)&zhat, NULL);
1364         if ((xhat_rval == 0) && (zhat != NULL)) {
1365                 mutex_enter(&zhat->lock);
1366                 ZULU_HAT2AS(zhat) = as;
1367                 zhat->zdev = zdev;
1368                 mutex_exit(&zhat->lock);
1369         }
1370 
1371         TNF_PROBE_3(zulu_hat_proc_attach, "zulu_hat", /* CSTYLED */,
1372             tnf_int, xhat_rval, xhat_rval, tnf_opaque, as, as,
1373             tnf_opaque, zhat, zhat);
1374 
1375         return (zhat);
1376 }
1377 
1378 void
1379 zulu_hat_proc_detach(struct zulu_hat *zhat)
1380 {
1381         struct  as *as = ZULU_HAT2AS(zhat);
1382 
1383         zulu_hat_ctx_free(zhat);
1384 
1385         (void) xhat_detach_xhat(&zulu_hat_provider, ZULU_HAT2AS(zhat));
1386 
1387         TNF_PROBE_1(zulu_hat_proc_detach, "zulu_hat", /* CSTYLED */,
1388                         tnf_opaque, as, as);
1389 }
1390 
1391 /*
1392  * zulu_hat_terminate
1393  *
1394  * Disables any further TLB miss processing for this hat
1395  * Called by zuluvm's as_free callback. The primary purpose of this
1396  * function is to cause any pending zulu DMA to abort quickly.
1397  */
1398 void
1399 zulu_hat_terminate(struct zulu_hat *zhat)
1400 {
1401         int     ctx = zhat->zulu_ctx;
1402 
1403         TNF_PROBE_1(zulu_hat_terminate, "zulu_hat", /* CSTYLED */,
1404                 tnf_int, ctx, ctx);
1405 
1406         mutex_enter(&zhat->lock);
1407 
1408         zhat->freed = 1;
1409 
1410         zulu_ctx_tsb_lock_enter(zhat);
1411         /*
1412          * zap the tsb
1413          */
1414         bzero(zhat->zulu_tsb, ZULU_TSB_SZ);
1415         zulu_ctx_tsb_lock_exit(zhat);
1416 
1417         zulu_hat_demap_ctx(zhat->zdev, zhat->zulu_ctx);
1418 
1419         mutex_exit(&zhat->lock);
1420 
1421         TNF_PROBE_0(zulu_hat_terminate_done, "zulu_hat", /* CSTYLED */);
1422 }