Print this page
6345 remove xhat support

Split Close
Expand all
Collapse all
          --- old/usr/src/uts/sfmmu/vm/hat_sfmmu.h
          +++ new/usr/src/uts/sfmmu/vm/hat_sfmmu.h
↓ open down ↓ 651 lines elided ↑ open up ↑
 652  652   * find the shared hme entry during trap handling and therefore there is no
 653  653   * corresponding event to initiate ttecnt accounting. Currently, as shared
 654  654   * hmes are only used for text segments, when joining a region we assume the
 655  655   * worst case and add the the number of ttes required to map the entire region
 656  656   * to the ttecnt corresponding to the region pagesize. However, if the region
 657  657   * has a 4M pagesize, and memory is low, the allocation of 4M pages may fail
 658  658   * then 8K pages will be allocated instead and the first TSB which stores 8K
 659  659   * mappings will potentially be undersized. To compensate for the potential
 660  660   * underaccounting in this case we always add 1/4 of the region size to the 8K
 661  661   * ttecnt.
 662      - *
 663      - * Note that sfmmu_xhat_provider MUST be the first element.
 664  662   */
 665  663  
 666  664  struct hat {
 667      -        void            *sfmmu_xhat_provider;   /* NULL for CPU hat */
 668  665          cpuset_t        sfmmu_cpusran;  /* cpu bit mask for efficient xcalls */
 669  666          struct  as      *sfmmu_as;      /* as this hat provides mapping for */
 670  667          /* per pgsz private ttecnt + shme rgns ttecnt for rgns not in SCD */
 671  668          ulong_t         sfmmu_ttecnt[MMU_PAGE_SIZES];
 672  669          /* shme rgns ttecnt for rgns in SCD */
 673  670          ulong_t         sfmmu_scdrttecnt[MMU_PAGE_SIZES];
 674  671          /* est. ism ttes that are NOT in a SCD */
 675  672          ulong_t         sfmmu_ismttecnt[MMU_PAGE_SIZES];
 676  673          /* ttecnt for isms that are in a SCD */
 677  674          ulong_t         sfmmu_scdismttecnt[MMU_PAGE_SIZES];
↓ open down ↓ 556 lines elided ↑ open up ↑
1234 1231   * The hmeblk structure contains 2 tte reference counters used to determine if
1235 1232   * it is ok to free up the hmeblk.  Both counters have to be zero in order
1236 1233   * to be able to free up hmeblk.  They are protected by cas.
1237 1234   * hblk_hmecnt is the number of hments present on pp mapping lists.
1238 1235   * hblk_vcnt reflects number of valid ttes in hmeblk.
1239 1236   *
1240 1237   * The hmeblk now also has per tte lock cnts.  This is required because
1241 1238   * the counts can be high and there are not enough bits in the tte. When
1242 1239   * physio is fixed to not lock the translations we should be able to move
1243 1240   * the lock cnt back to the tte.  See bug id 1198554.
1244      - *
1245      - * Note that xhat_hme_blk's layout follows this structure: hme_blk_misc
1246      - * and sf_hment are at the same offsets in both structures. Whenever
1247      - * hme_blk is changed, xhat_hme_blk may need to be updated as well.
1248 1241   */
1249 1242  
1250 1243  struct hme_blk_misc {
1251      -        uint_t  notused:25;
     1244 +        uint_t  notused:26;
1252 1245          uint_t  shared_bit:1;   /* set for SRD shared hmeblk */
1253      -        uint_t  xhat_bit:1;     /* set for an xhat hme_blk */
1254 1246          uint_t  shadow_bit:1;   /* set for a shadow hme_blk */
1255 1247          uint_t  nucleus_bit:1;  /* set for a nucleus hme_blk */
1256 1248          uint_t  ttesize:3;      /* contains ttesz of hmeblk */
1257 1249  };
1258 1250  
1259 1251  struct hme_blk {
1260 1252          volatile uint64_t hblk_nextpa;  /* physical address for hash list */
1261 1253  
1262 1254          hmeblk_tag      hblk_tag;       /* tag used to obtain an hmeblk match */
1263 1255  
↓ open down ↓ 19 lines elided ↑ open up ↑
1283 1275  #ifdef  HBLK_TRACE
1284 1276          kmutex_t        hblk_audit_lock;        /* lock to protect index */
1285 1277          uint_t          hblk_audit_index;       /* index into audit_cache */
1286 1278          struct  hblk_lockcnt_audit hblk_audit_cache[HBLK_AUDIT_CACHE_SIZE];
1287 1279  #endif  /* HBLK_AUDIT */
1288 1280  
1289 1281          struct sf_hment hblk_hme[1];    /* hment array */
1290 1282  };
1291 1283  
1292 1284  #define hblk_shared     hblk_misc.shared_bit
1293      -#define hblk_xhat_bit   hblk_misc.xhat_bit
1294 1285  #define hblk_shw_bit    hblk_misc.shadow_bit
1295 1286  #define hblk_nuc_bit    hblk_misc.nucleus_bit
1296 1287  #define hblk_ttesz      hblk_misc.ttesize
1297 1288  #define hblk_hmecnt     hblk_un.hblk_counts.hblk_hmecount
1298 1289  #define hblk_vcnt       hblk_un.hblk_counts.hblk_validcnt
1299 1290  #define hblk_shw_mask   hblk_un.hblk_shadow_mask
1300 1291  
1301 1292  #define MAX_HBLK_LCKCNT 0xFFFFFFFF
1302 1293  #define HMEBLK_ALIGN    0x8             /* hmeblk has to be double aligned */
1303 1294  
↓ open down ↓ 1004 lines elided ↑ open up ↑
2308 2299  extern void     sfmmu_tsb_segkmem_free(vmem_t *, void *, size_t);
2309 2300  extern void     sfmmu_reprog_pgsz_arr(sfmmu_t *, uint8_t *);
2310 2301  
2311 2302  extern void     hat_kern_setup(void);
2312 2303  extern int      hat_page_relocate(page_t **, page_t **, spgcnt_t *);
2313 2304  extern int      sfmmu_get_ppvcolor(struct page *);
2314 2305  extern int      sfmmu_get_addrvcolor(caddr_t);
2315 2306  extern int      sfmmu_hat_lock_held(sfmmu_t *);
2316 2307  extern int      sfmmu_alloc_ctx(sfmmu_t *, int, struct cpu *, int);
2317 2308  
2318      -/*
2319      - * Functions exported to xhat_sfmmu.c
2320      - */
2321 2309  extern kmutex_t *sfmmu_mlist_enter(page_t *);
2322 2310  extern void     sfmmu_mlist_exit(kmutex_t *);
2323 2311  extern int      sfmmu_mlist_held(struct page *);
2324 2312  extern struct hme_blk *sfmmu_hmetohblk(struct sf_hment *);
2325 2313  
2326 2314  /*
2327 2315   * MMU-specific functions optionally imported from the CPU module
2328 2316   */
2329 2317  #pragma weak mmu_init_scd
2330 2318  #pragma weak mmu_large_pages_disabled
↓ open down ↓ 258 lines elided ↑ open up ↑
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX