1 /*
   2  * CDDL HEADER START
   3  *
   4  * The contents of this file are subject to the terms of the
   5  * Common Development and Distribution License (the "License").
   6  * You may not use this file except in compliance with the License.
   7  *
   8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
   9  * or http://www.opensolaris.org/os/licensing.
  10  * See the License for the specific language governing permissions
  11  * and limitations under the License.
  12  *
  13  * When distributing Covered Code, include this CDDL HEADER in each
  14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
  15  * If applicable, add the following below this CDDL HEADER, with the
  16  * fields enclosed by brackets "[]" replaced with your own identifying
  17  * information: Portions Copyright [yyyy] [name of copyright owner]
  18  *
  19  * CDDL HEADER END
  20  */
  21 
  22 /*
  23  * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
  24  */
  25 
  26 #include <sys/types.h>
  27 #include <sys/cmn_err.h>
  28 #include <sys/vmsystm.h>
  29 #include <sys/vmem.h>
  30 #include <sys/machsystm.h>        /* lddphys() */
  31 #include <sys/iommutsb.h>
  32 #include <px_obj.h>
  33 #include <sys/hotplug/pci/pcie_hp.h>
  34 #include "px_regs.h"
  35 #include "oberon_regs.h"
  36 #include "px_csr.h"
  37 #include "px_lib4u.h"
  38 #include "px_err.h"
  39 
  40 /*
  41  * Registers that need to be saved and restored during suspend/resume.
  42  */
  43 
  44 /*
  45  * Registers in the PEC Module.
  46  * LPU_RESET should be set to 0ull during resume
  47  *
  48  * This array is in reg,chip form. PX_CHIP_UNIDENTIFIED is for all chips
  49  * or PX_CHIP_FIRE for Fire only, or PX_CHIP_OBERON for Oberon only.
  50  */
  51 static struct px_pec_regs {
  52         uint64_t reg;
  53         uint64_t chip;
  54 } pec_config_state_regs[] = {
  55         {PEC_CORE_AND_BLOCK_INTERRUPT_ENABLE, PX_CHIP_UNIDENTIFIED},
  56         {ILU_ERROR_LOG_ENABLE, PX_CHIP_UNIDENTIFIED},
  57         {ILU_INTERRUPT_ENABLE, PX_CHIP_UNIDENTIFIED},
  58         {TLU_CONTROL, PX_CHIP_UNIDENTIFIED},
  59         {TLU_OTHER_EVENT_LOG_ENABLE, PX_CHIP_UNIDENTIFIED},
  60         {TLU_OTHER_EVENT_INTERRUPT_ENABLE, PX_CHIP_UNIDENTIFIED},
  61         {TLU_DEVICE_CONTROL, PX_CHIP_UNIDENTIFIED},
  62         {TLU_LINK_CONTROL, PX_CHIP_UNIDENTIFIED},
  63         {TLU_UNCORRECTABLE_ERROR_LOG_ENABLE, PX_CHIP_UNIDENTIFIED},
  64         {TLU_UNCORRECTABLE_ERROR_INTERRUPT_ENABLE, PX_CHIP_UNIDENTIFIED},
  65         {TLU_CORRECTABLE_ERROR_LOG_ENABLE, PX_CHIP_UNIDENTIFIED},
  66         {TLU_CORRECTABLE_ERROR_INTERRUPT_ENABLE, PX_CHIP_UNIDENTIFIED},
  67         {DLU_LINK_LAYER_CONFIG, PX_CHIP_OBERON},
  68         {DLU_FLOW_CONTROL_UPDATE_CONTROL, PX_CHIP_OBERON},
  69         {DLU_TXLINK_REPLAY_TIMER_THRESHOLD, PX_CHIP_OBERON},
  70         {LPU_LINK_LAYER_INTERRUPT_MASK, PX_CHIP_FIRE},
  71         {LPU_PHY_INTERRUPT_MASK, PX_CHIP_FIRE},
  72         {LPU_RECEIVE_PHY_INTERRUPT_MASK, PX_CHIP_FIRE},
  73         {LPU_TRANSMIT_PHY_INTERRUPT_MASK, PX_CHIP_FIRE},
  74         {LPU_GIGABLAZE_GLUE_INTERRUPT_MASK, PX_CHIP_FIRE},
  75         {LPU_LTSSM_INTERRUPT_MASK, PX_CHIP_FIRE},
  76         {LPU_RESET, PX_CHIP_FIRE},
  77         {LPU_DEBUG_CONFIG, PX_CHIP_FIRE},
  78         {LPU_INTERRUPT_MASK, PX_CHIP_FIRE},
  79         {LPU_LINK_LAYER_CONFIG, PX_CHIP_FIRE},
  80         {LPU_FLOW_CONTROL_UPDATE_CONTROL, PX_CHIP_FIRE},
  81         {LPU_TXLINK_FREQUENT_NAK_LATENCY_TIMER_THRESHOLD, PX_CHIP_FIRE},
  82         {LPU_TXLINK_REPLAY_TIMER_THRESHOLD, PX_CHIP_FIRE},
  83         {LPU_REPLAY_BUFFER_MAX_ADDRESS, PX_CHIP_FIRE},
  84         {LPU_TXLINK_RETRY_FIFO_POINTER, PX_CHIP_FIRE},
  85         {LPU_LTSSM_CONFIG2, PX_CHIP_FIRE},
  86         {LPU_LTSSM_CONFIG3, PX_CHIP_FIRE},
  87         {LPU_LTSSM_CONFIG4, PX_CHIP_FIRE},
  88         {LPU_LTSSM_CONFIG5, PX_CHIP_FIRE},
  89         {DMC_CORE_AND_BLOCK_INTERRUPT_ENABLE, PX_CHIP_UNIDENTIFIED},
  90         {DMC_DEBUG_SELECT_FOR_PORT_A, PX_CHIP_UNIDENTIFIED},
  91         {DMC_DEBUG_SELECT_FOR_PORT_B, PX_CHIP_UNIDENTIFIED}
  92 };
  93 
  94 #define PEC_KEYS        \
  95         ((sizeof (pec_config_state_regs))/sizeof (struct px_pec_regs))
  96 
  97 #define PEC_SIZE        (PEC_KEYS * sizeof (uint64_t))
  98 
  99 /*
 100  * Registers for the MMU module.
 101  * MMU_TTE_CACHE_INVALIDATE needs to be cleared. (-1ull)
 102  */
 103 static uint64_t mmu_config_state_regs[] = {
 104         MMU_TSB_CONTROL,
 105         MMU_CONTROL_AND_STATUS,
 106         MMU_ERROR_LOG_ENABLE,
 107         MMU_INTERRUPT_ENABLE
 108 };
 109 #define MMU_SIZE (sizeof (mmu_config_state_regs))
 110 #define MMU_KEYS (MMU_SIZE / sizeof (uint64_t))
 111 
 112 /*
 113  * Registers for the IB Module
 114  */
 115 static uint64_t ib_config_state_regs[] = {
 116         IMU_ERROR_LOG_ENABLE,
 117         IMU_INTERRUPT_ENABLE
 118 };
 119 #define IB_SIZE (sizeof (ib_config_state_regs))
 120 #define IB_KEYS (IB_SIZE / sizeof (uint64_t))
 121 #define IB_MAP_SIZE (INTERRUPT_MAPPING_ENTRIES * sizeof (uint64_t))
 122 
 123 /*
 124  * Registers for the JBC module.
 125  * JBC_ERROR_STATUS_CLEAR needs to be cleared. (-1ull)
 126  */
 127 static uint64_t jbc_config_state_regs[] = {
 128         JBUS_PARITY_CONTROL,
 129         JBC_FATAL_RESET_ENABLE,
 130         JBC_CORE_AND_BLOCK_INTERRUPT_ENABLE,
 131         JBC_ERROR_LOG_ENABLE,
 132         JBC_INTERRUPT_ENABLE
 133 };
 134 #define JBC_SIZE (sizeof (jbc_config_state_regs))
 135 #define JBC_KEYS (JBC_SIZE / sizeof (uint64_t))
 136 
 137 /*
 138  * Registers for the UBC module.
 139  * UBC_ERROR_STATUS_CLEAR needs to be cleared. (-1ull)
 140  */
 141 static uint64_t ubc_config_state_regs[] = {
 142         UBC_ERROR_LOG_ENABLE,
 143         UBC_INTERRUPT_ENABLE
 144 };
 145 #define UBC_SIZE (sizeof (ubc_config_state_regs))
 146 #define UBC_KEYS (UBC_SIZE / sizeof (uint64_t))
 147 
 148 static uint64_t msiq_config_other_regs[] = {
 149         ERR_COR_MAPPING,
 150         ERR_NONFATAL_MAPPING,
 151         ERR_FATAL_MAPPING,
 152         PM_PME_MAPPING,
 153         PME_TO_ACK_MAPPING,
 154         MSI_32_BIT_ADDRESS,
 155         MSI_64_BIT_ADDRESS
 156 };
 157 #define MSIQ_OTHER_SIZE (sizeof (msiq_config_other_regs))
 158 #define MSIQ_OTHER_KEYS (MSIQ_OTHER_SIZE / sizeof (uint64_t))
 159 
 160 #define MSIQ_STATE_SIZE         (EVENT_QUEUE_STATE_ENTRIES * sizeof (uint64_t))
 161 #define MSIQ_MAPPING_SIZE       (MSI_MAPPING_ENTRIES * sizeof (uint64_t))
 162 
 163 /* OPL tuning variables for link unstable issue */
 164 int wait_perst = 5000000;       /* step 9, default: 5s */
 165 int wait_enable_port = 30000;   /* step 11, default: 30ms */
 166 int link_retry_count = 2;       /* step 11, default: 2 */
 167 int link_status_check = 400000; /* step 11, default: 400ms */
 168 
 169 static uint64_t msiq_suspend(devhandle_t dev_hdl, pxu_t *pxu_p);
 170 static void msiq_resume(devhandle_t dev_hdl, pxu_t *pxu_p);
 171 static void jbc_init(caddr_t xbc_csr_base, pxu_t *pxu_p);
 172 static void ubc_init(caddr_t xbc_csr_base, pxu_t *pxu_p);
 173 
 174 extern int px_acknak_timer_table[LINK_MAX_PKT_ARR_SIZE][LINK_WIDTH_ARR_SIZE];
 175 extern int px_replay_timer_table[LINK_MAX_PKT_ARR_SIZE][LINK_WIDTH_ARR_SIZE];
 176 
 177 /*
 178  * Initialize the bus, but do not enable interrupts.
 179  */
 180 /* ARGSUSED */
 181 void
 182 hvio_cb_init(caddr_t xbc_csr_base, pxu_t *pxu_p)
 183 {
 184         switch (PX_CHIP_TYPE(pxu_p)) {
 185         case PX_CHIP_OBERON:
 186                 ubc_init(xbc_csr_base, pxu_p);
 187                 break;
 188         case PX_CHIP_FIRE:
 189                 jbc_init(xbc_csr_base, pxu_p);
 190                 break;
 191         default:
 192                 DBG(DBG_CB, NULL, "hvio_cb_init - unknown chip type: 0x%x\n",
 193                     PX_CHIP_TYPE(pxu_p));
 194                 break;
 195         }
 196 }
 197 
 198 /*
 199  * Initialize the JBC module, but do not enable interrupts.
 200  */
 201 /* ARGSUSED */
 202 static void
 203 jbc_init(caddr_t xbc_csr_base, pxu_t *pxu_p)
 204 {
 205         uint64_t val;
 206 
 207         /* Check if we need to enable inverted parity */
 208         val = (1ULL << JBUS_PARITY_CONTROL_P_EN);
 209         CSR_XS(xbc_csr_base, JBUS_PARITY_CONTROL, val);
 210         DBG(DBG_CB, NULL, "jbc_init, JBUS_PARITY_CONTROL: 0x%llx\n",
 211             CSR_XR(xbc_csr_base, JBUS_PARITY_CONTROL));
 212 
 213         val = (1 << JBC_FATAL_RESET_ENABLE_SPARE_P_INT_EN) |
 214             (1 << JBC_FATAL_RESET_ENABLE_MB_PEA_P_INT_EN) |
 215             (1 << JBC_FATAL_RESET_ENABLE_CPE_P_INT_EN) |
 216             (1 << JBC_FATAL_RESET_ENABLE_APE_P_INT_EN) |
 217             (1 << JBC_FATAL_RESET_ENABLE_PIO_CPE_INT_EN) |
 218             (1 << JBC_FATAL_RESET_ENABLE_JTCEEW_P_INT_EN) |
 219             (1 << JBC_FATAL_RESET_ENABLE_JTCEEI_P_INT_EN) |
 220             (1 << JBC_FATAL_RESET_ENABLE_JTCEER_P_INT_EN);
 221         CSR_XS(xbc_csr_base, JBC_FATAL_RESET_ENABLE, val);
 222         DBG(DBG_CB, NULL, "jbc_init, JBC_FATAL_RESET_ENABLE: 0x%llx\n",
 223             CSR_XR(xbc_csr_base, JBC_FATAL_RESET_ENABLE));
 224 
 225         /*
 226          * Enable merge, jbc and dmc interrupts.
 227          */
 228         CSR_XS(xbc_csr_base, JBC_CORE_AND_BLOCK_INTERRUPT_ENABLE, -1ull);
 229         DBG(DBG_CB, NULL,
 230             "jbc_init, JBC_CORE_AND_BLOCK_INTERRUPT_ENABLE: 0x%llx\n",
 231             CSR_XR(xbc_csr_base, JBC_CORE_AND_BLOCK_INTERRUPT_ENABLE));
 232 
 233         /*
 234          * CSR_V JBC's interrupt regs (log, enable, status, clear)
 235          */
 236         DBG(DBG_CB, NULL, "jbc_init, JBC_ERROR_LOG_ENABLE: 0x%llx\n",
 237             CSR_XR(xbc_csr_base, JBC_ERROR_LOG_ENABLE));
 238 
 239         DBG(DBG_CB, NULL, "jbc_init, JBC_INTERRUPT_ENABLE: 0x%llx\n",
 240             CSR_XR(xbc_csr_base, JBC_INTERRUPT_ENABLE));
 241 
 242         DBG(DBG_CB, NULL, "jbc_init, JBC_INTERRUPT_STATUS: 0x%llx\n",
 243             CSR_XR(xbc_csr_base, JBC_INTERRUPT_STATUS));
 244 
 245         DBG(DBG_CB, NULL, "jbc_init, JBC_ERROR_STATUS_CLEAR: 0x%llx\n",
 246             CSR_XR(xbc_csr_base, JBC_ERROR_STATUS_CLEAR));
 247 }
 248 
 249 /*
 250  * Initialize the UBC module, but do not enable interrupts.
 251  */
 252 /* ARGSUSED */
 253 static void
 254 ubc_init(caddr_t xbc_csr_base, pxu_t *pxu_p)
 255 {
 256         /*
 257          * Enable Uranus bus error log bits.
 258          */
 259         CSR_XS(xbc_csr_base, UBC_ERROR_LOG_ENABLE, -1ull);
 260         DBG(DBG_CB, NULL, "ubc_init, UBC_ERROR_LOG_ENABLE: 0x%llx\n",
 261             CSR_XR(xbc_csr_base, UBC_ERROR_LOG_ENABLE));
 262 
 263         /*
 264          * Clear Uranus bus errors.
 265          */
 266         CSR_XS(xbc_csr_base, UBC_ERROR_STATUS_CLEAR, -1ull);
 267         DBG(DBG_CB, NULL, "ubc_init, UBC_ERROR_STATUS_CLEAR: 0x%llx\n",
 268             CSR_XR(xbc_csr_base, UBC_ERROR_STATUS_CLEAR));
 269 
 270         /*
 271          * CSR_V UBC's interrupt regs (log, enable, status, clear)
 272          */
 273         DBG(DBG_CB, NULL, "ubc_init, UBC_ERROR_LOG_ENABLE: 0x%llx\n",
 274             CSR_XR(xbc_csr_base, UBC_ERROR_LOG_ENABLE));
 275 
 276         DBG(DBG_CB, NULL, "ubc_init, UBC_INTERRUPT_ENABLE: 0x%llx\n",
 277             CSR_XR(xbc_csr_base, UBC_INTERRUPT_ENABLE));
 278 
 279         DBG(DBG_CB, NULL, "ubc_init, UBC_INTERRUPT_STATUS: 0x%llx\n",
 280             CSR_XR(xbc_csr_base, UBC_INTERRUPT_STATUS));
 281 
 282         DBG(DBG_CB, NULL, "ubc_init, UBC_ERROR_STATUS_CLEAR: 0x%llx\n",
 283             CSR_XR(xbc_csr_base, UBC_ERROR_STATUS_CLEAR));
 284 }
 285 
 286 /*
 287  * Initialize the module, but do not enable interrupts.
 288  */
 289 /* ARGSUSED */
 290 void
 291 hvio_ib_init(caddr_t csr_base, pxu_t *pxu_p)
 292 {
 293         /*
 294          * CSR_V IB's interrupt regs (log, enable, status, clear)
 295          */
 296         DBG(DBG_IB, NULL, "hvio_ib_init - IMU_ERROR_LOG_ENABLE: 0x%llx\n",
 297             CSR_XR(csr_base, IMU_ERROR_LOG_ENABLE));
 298 
 299         DBG(DBG_IB, NULL, "hvio_ib_init - IMU_INTERRUPT_ENABLE: 0x%llx\n",
 300             CSR_XR(csr_base, IMU_INTERRUPT_ENABLE));
 301 
 302         DBG(DBG_IB, NULL, "hvio_ib_init - IMU_INTERRUPT_STATUS: 0x%llx\n",
 303             CSR_XR(csr_base, IMU_INTERRUPT_STATUS));
 304 
 305         DBG(DBG_IB, NULL, "hvio_ib_init - IMU_ERROR_STATUS_CLEAR: 0x%llx\n",
 306             CSR_XR(csr_base, IMU_ERROR_STATUS_CLEAR));
 307 }
 308 
 309 /*
 310  * Initialize the module, but do not enable interrupts.
 311  */
 312 /* ARGSUSED */
 313 static void
 314 ilu_init(caddr_t csr_base, pxu_t *pxu_p)
 315 {
 316         /*
 317          * CSR_V ILU's interrupt regs (log, enable, status, clear)
 318          */
 319         DBG(DBG_ILU, NULL, "ilu_init - ILU_ERROR_LOG_ENABLE: 0x%llx\n",
 320             CSR_XR(csr_base, ILU_ERROR_LOG_ENABLE));
 321 
 322         DBG(DBG_ILU, NULL, "ilu_init - ILU_INTERRUPT_ENABLE: 0x%llx\n",
 323             CSR_XR(csr_base, ILU_INTERRUPT_ENABLE));
 324 
 325         DBG(DBG_ILU, NULL, "ilu_init - ILU_INTERRUPT_STATUS: 0x%llx\n",
 326             CSR_XR(csr_base, ILU_INTERRUPT_STATUS));
 327 
 328         DBG(DBG_ILU, NULL, "ilu_init - ILU_ERROR_STATUS_CLEAR: 0x%llx\n",
 329             CSR_XR(csr_base, ILU_ERROR_STATUS_CLEAR));
 330 }
 331 
 332 /*
 333  * Initialize the module, but do not enable interrupts.
 334  */
 335 /* ARGSUSED */
 336 static void
 337 tlu_init(caddr_t csr_base, pxu_t *pxu_p)
 338 {
 339         uint64_t val;
 340 
 341         /*
 342          * CSR_V TLU_CONTROL Expect OBP ???
 343          */
 344 
 345         /*
 346          * L0s entry default timer value - 7.0 us
 347          * Completion timeout select default value - 67.1 ms and
 348          * OBP will set this value.
 349          *
 350          * Configuration - Bit 0 should always be 0 for upstream port.
 351          * Bit 1 is clock - how is this related to the clock bit in TLU
 352          * Link Control register?  Both are hardware dependent and likely
 353          * set by OBP.
 354          *
 355          * NOTE: Do not set the NPWR_EN bit.  The desired value of this bit
 356          * will be set by OBP.
 357          */
 358         val = CSR_XR(csr_base, TLU_CONTROL);
 359         val |= (TLU_CONTROL_L0S_TIM_DEFAULT << TLU_CONTROL_L0S_TIM) |
 360             TLU_CONTROL_CONFIG_DEFAULT;
 361 
 362         /*
 363          * For Oberon, NPWR_EN is set to 0 to prevent PIO reads from blocking
 364          * behind non-posted PIO writes. This blocking could cause a master or
 365          * slave timeout on the host bus if multiple serialized PIOs were to
 366          * suffer Completion Timeouts because the CTO delays for each PIO ahead
 367          * of the read would accumulate. Since the Olympus processor can have
 368          * only 1 PIO outstanding, there is no possibility of PIO accesses from
 369          * a given CPU to a given device being re-ordered by the PCIe fabric;
 370          * therefore turning off serialization should be safe from a PCIe
 371          * ordering perspective.
 372          */
 373         if (PX_CHIP_TYPE(pxu_p) == PX_CHIP_OBERON)
 374                 val &= ~(1ull << TLU_CONTROL_NPWR_EN);
 375 
 376         /*
 377          * Set Detect.Quiet. This will disable automatic link
 378          * re-training, if the link goes down e.g. power management
 379          * turns off power to the downstream device. This will enable
 380          * Fire to go to Drain state, after link down. The drain state
 381          * forces a reset to the FC state machine, which is required for
 382          * proper link re-training.
 383          */
 384         val |= (1ull << TLU_REMAIN_DETECT_QUIET);
 385         CSR_XS(csr_base, TLU_CONTROL, val);
 386         DBG(DBG_TLU, NULL, "tlu_init - TLU_CONTROL: 0x%llx\n",
 387             CSR_XR(csr_base, TLU_CONTROL));
 388 
 389         /*
 390          * CSR_V TLU_STATUS Expect HW 0x4
 391          */
 392 
 393         /*
 394          * Only bit [7:0] are currently defined.  Bits [2:0]
 395          * are the state, which should likely be in state active,
 396          * 100b.  Bit three is 'recovery', which is not understood.
 397          * All other bits are reserved.
 398          */
 399         DBG(DBG_TLU, NULL, "tlu_init - TLU_STATUS: 0x%llx\n",
 400             CSR_XR(csr_base, TLU_STATUS));
 401 
 402         /*
 403          * CSR_V TLU_PME_TURN_OFF_GENERATE Expect HW 0x0
 404          */
 405         DBG(DBG_TLU, NULL, "tlu_init - TLU_PME_TURN_OFF_GENERATE: 0x%llx\n",
 406             CSR_XR(csr_base, TLU_PME_TURN_OFF_GENERATE));
 407 
 408         /*
 409          * CSR_V TLU_INGRESS_CREDITS_INITIAL Expect HW 0x10000200C0
 410          */
 411 
 412         /*
 413          * Ingress credits initial register.  Bits [39:32] should be
 414          * 0x10, bits [19:12] should be 0x20, and bits [11:0] should
 415          * be 0xC0.  These are the reset values, and should be set by
 416          * HW.
 417          */
 418         DBG(DBG_TLU, NULL, "tlu_init - TLU_INGRESS_CREDITS_INITIAL: 0x%llx\n",
 419             CSR_XR(csr_base, TLU_INGRESS_CREDITS_INITIAL));
 420 
 421         /*
 422          * CSR_V TLU_DIAGNOSTIC Expect HW 0x0
 423          */
 424 
 425         /*
 426          * Diagnostic register - always zero unless we are debugging.
 427          */
 428         DBG(DBG_TLU, NULL, "tlu_init - TLU_DIAGNOSTIC: 0x%llx\n",
 429             CSR_XR(csr_base, TLU_DIAGNOSTIC));
 430 
 431         /*
 432          * CSR_V TLU_EGRESS_CREDITS_CONSUMED Expect HW 0x0
 433          */
 434         DBG(DBG_TLU, NULL, "tlu_init - TLU_EGRESS_CREDITS_CONSUMED: 0x%llx\n",
 435             CSR_XR(csr_base, TLU_EGRESS_CREDITS_CONSUMED));
 436 
 437         /*
 438          * CSR_V TLU_EGRESS_CREDIT_LIMIT Expect HW 0x0
 439          */
 440         DBG(DBG_TLU, NULL, "tlu_init - TLU_EGRESS_CREDIT_LIMIT: 0x%llx\n",
 441             CSR_XR(csr_base, TLU_EGRESS_CREDIT_LIMIT));
 442 
 443         /*
 444          * CSR_V TLU_EGRESS_RETRY_BUFFER Expect HW 0x0
 445          */
 446         DBG(DBG_TLU, NULL, "tlu_init - TLU_EGRESS_RETRY_BUFFER: 0x%llx\n",
 447             CSR_XR(csr_base, TLU_EGRESS_RETRY_BUFFER));
 448 
 449         /*
 450          * CSR_V TLU_INGRESS_CREDITS_ALLOCATED Expected HW 0x0
 451          */
 452         DBG(DBG_TLU, NULL,
 453             "tlu_init - TLU_INGRESS_CREDITS_ALLOCATED: 0x%llx\n",
 454             CSR_XR(csr_base, TLU_INGRESS_CREDITS_ALLOCATED));
 455 
 456         /*
 457          * CSR_V TLU_INGRESS_CREDITS_RECEIVED Expected HW 0x0
 458          */
 459         DBG(DBG_TLU, NULL,
 460             "tlu_init - TLU_INGRESS_CREDITS_RECEIVED: 0x%llx\n",
 461             CSR_XR(csr_base, TLU_INGRESS_CREDITS_RECEIVED));
 462 
 463         /*
 464          * CSR_V TLU's interrupt regs (log, enable, status, clear)
 465          */
 466         DBG(DBG_TLU, NULL,
 467             "tlu_init - TLU_OTHER_EVENT_LOG_ENABLE: 0x%llx\n",
 468             CSR_XR(csr_base, TLU_OTHER_EVENT_LOG_ENABLE));
 469 
 470         DBG(DBG_TLU, NULL,
 471             "tlu_init - TLU_OTHER_EVENT_INTERRUPT_ENABLE: 0x%llx\n",
 472             CSR_XR(csr_base, TLU_OTHER_EVENT_INTERRUPT_ENABLE));
 473 
 474         DBG(DBG_TLU, NULL,
 475             "tlu_init - TLU_OTHER_EVENT_INTERRUPT_STATUS: 0x%llx\n",
 476             CSR_XR(csr_base, TLU_OTHER_EVENT_INTERRUPT_STATUS));
 477 
 478         DBG(DBG_TLU, NULL,
 479             "tlu_init - TLU_OTHER_EVENT_STATUS_CLEAR: 0x%llx\n",
 480             CSR_XR(csr_base, TLU_OTHER_EVENT_STATUS_CLEAR));
 481 
 482         /*
 483          * CSR_V TLU_RECEIVE_OTHER_EVENT_HEADER1_LOG Expect HW 0x0
 484          */
 485         DBG(DBG_TLU, NULL,
 486             "tlu_init - TLU_RECEIVE_OTHER_EVENT_HEADER1_LOG: 0x%llx\n",
 487             CSR_XR(csr_base, TLU_RECEIVE_OTHER_EVENT_HEADER1_LOG));
 488 
 489         /*
 490          * CSR_V TLU_RECEIVE_OTHER_EVENT_HEADER2_LOG Expect HW 0x0
 491          */
 492         DBG(DBG_TLU, NULL,
 493             "tlu_init - TLU_RECEIVE_OTHER_EVENT_HEADER2_LOG: 0x%llx\n",
 494             CSR_XR(csr_base, TLU_RECEIVE_OTHER_EVENT_HEADER2_LOG));
 495 
 496         /*
 497          * CSR_V TLU_TRANSMIT_OTHER_EVENT_HEADER1_LOG Expect HW 0x0
 498          */
 499         DBG(DBG_TLU, NULL,
 500             "tlu_init - TLU_TRANSMIT_OTHER_EVENT_HEADER1_LOG: 0x%llx\n",
 501             CSR_XR(csr_base, TLU_TRANSMIT_OTHER_EVENT_HEADER1_LOG));
 502 
 503         /*
 504          * CSR_V TLU_TRANSMIT_OTHER_EVENT_HEADER2_LOG Expect HW 0x0
 505          */
 506         DBG(DBG_TLU, NULL,
 507             "tlu_init - TLU_TRANSMIT_OTHER_EVENT_HEADER2_LOG: 0x%llx\n",
 508             CSR_XR(csr_base, TLU_TRANSMIT_OTHER_EVENT_HEADER2_LOG));
 509 
 510         /*
 511          * CSR_V TLU_PERFORMANCE_COUNTER_SELECT Expect HW 0x0
 512          */
 513         DBG(DBG_TLU, NULL,
 514             "tlu_init - TLU_PERFORMANCE_COUNTER_SELECT: 0x%llx\n",
 515             CSR_XR(csr_base, TLU_PERFORMANCE_COUNTER_SELECT));
 516 
 517         /*
 518          * CSR_V TLU_PERFORMANCE_COUNTER_ZERO Expect HW 0x0
 519          */
 520         DBG(DBG_TLU, NULL,
 521             "tlu_init - TLU_PERFORMANCE_COUNTER_ZERO: 0x%llx\n",
 522             CSR_XR(csr_base, TLU_PERFORMANCE_COUNTER_ZERO));
 523 
 524         /*
 525          * CSR_V TLU_PERFORMANCE_COUNTER_ONE Expect HW 0x0
 526          */
 527         DBG(DBG_TLU, NULL, "tlu_init - TLU_PERFORMANCE_COUNTER_ONE: 0x%llx\n",
 528             CSR_XR(csr_base, TLU_PERFORMANCE_COUNTER_ONE));
 529 
 530         /*
 531          * CSR_V TLU_PERFORMANCE_COUNTER_TWO Expect HW 0x0
 532          */
 533         DBG(DBG_TLU, NULL, "tlu_init - TLU_PERFORMANCE_COUNTER_TWO: 0x%llx\n",
 534             CSR_XR(csr_base, TLU_PERFORMANCE_COUNTER_TWO));
 535 
 536         /*
 537          * CSR_V TLU_DEBUG_SELECT_A Expect HW 0x0
 538          */
 539 
 540         DBG(DBG_TLU, NULL, "tlu_init - TLU_DEBUG_SELECT_A: 0x%llx\n",
 541             CSR_XR(csr_base, TLU_DEBUG_SELECT_A));
 542 
 543         /*
 544          * CSR_V TLU_DEBUG_SELECT_B Expect HW 0x0
 545          */
 546         DBG(DBG_TLU, NULL, "tlu_init - TLU_DEBUG_SELECT_B: 0x%llx\n",
 547             CSR_XR(csr_base, TLU_DEBUG_SELECT_B));
 548 
 549         /*
 550          * CSR_V TLU_DEVICE_CAPABILITIES Expect HW 0xFC2
 551          */
 552         DBG(DBG_TLU, NULL, "tlu_init - TLU_DEVICE_CAPABILITIES: 0x%llx\n",
 553             CSR_XR(csr_base, TLU_DEVICE_CAPABILITIES));
 554 
 555         /*
 556          * CSR_V TLU_DEVICE_CONTROL Expect HW 0x0
 557          */
 558 
 559         /*
 560          * Bits [14:12] are the Max Read Request Size, which is always 64
 561          * bytes which is 000b.  Bits [7:5] are Max Payload Size, which
 562          * start at 128 bytes which is 000b.  This may be revisited if
 563          * init_child finds greater values.
 564          */
 565         val = 0x0ull;
 566         CSR_XS(csr_base, TLU_DEVICE_CONTROL, val);
 567         DBG(DBG_TLU, NULL, "tlu_init - TLU_DEVICE_CONTROL: 0x%llx\n",
 568             CSR_XR(csr_base, TLU_DEVICE_CONTROL));
 569 
 570         /*
 571          * CSR_V TLU_DEVICE_STATUS Expect HW 0x0
 572          */
 573         DBG(DBG_TLU, NULL, "tlu_init - TLU_DEVICE_STATUS: 0x%llx\n",
 574             CSR_XR(csr_base, TLU_DEVICE_STATUS));
 575 
 576         /*
 577          * CSR_V TLU_LINK_CAPABILITIES Expect HW 0x15C81
 578          */
 579         DBG(DBG_TLU, NULL, "tlu_init - TLU_LINK_CAPABILITIES: 0x%llx\n",
 580             CSR_XR(csr_base, TLU_LINK_CAPABILITIES));
 581 
 582         /*
 583          * CSR_V TLU_LINK_CONTROL Expect OBP 0x40
 584          */
 585 
 586         /*
 587          * The CLOCK bit should be set by OBP if the hardware dictates,
 588          * and if it is set then ASPM should be used since then L0s exit
 589          * latency should be lower than L1 exit latency.
 590          *
 591          * Note that we will not enable power management during bringup
 592          * since it has not been test and is creating some problems in
 593          * simulation.
 594          */
 595         val = (1ull << TLU_LINK_CONTROL_CLOCK);
 596 
 597         CSR_XS(csr_base, TLU_LINK_CONTROL, val);
 598         DBG(DBG_TLU, NULL, "tlu_init - TLU_LINK_CONTROL: 0x%llx\n",
 599             CSR_XR(csr_base, TLU_LINK_CONTROL));
 600 
 601         /*
 602          * CSR_V TLU_LINK_STATUS Expect OBP 0x1011
 603          */
 604 
 605         /*
 606          * Not sure if HW or OBP will be setting this read only
 607          * register.  Bit 12 is Clock, and it should always be 1
 608          * signifying that the component uses the same physical
 609          * clock as the platform.  Bits [9:4] are for the width,
 610          * with the expected value above signifying a x1 width.
 611          * Bits [3:0] are the speed, with 1b signifying 2.5 Gb/s,
 612          * the only speed as yet supported by the PCI-E spec.
 613          */
 614         DBG(DBG_TLU, NULL, "tlu_init - TLU_LINK_STATUS: 0x%llx\n",
 615             CSR_XR(csr_base, TLU_LINK_STATUS));
 616 
 617         /*
 618          * CSR_V TLU_SLOT_CAPABILITIES Expect OBP ???
 619          */
 620 
 621         /*
 622          * Power Limits for the slots.  Will be platform
 623          * dependent, and OBP will need to set after consulting
 624          * with the HW guys.
 625          *
 626          * Bits [16:15] are power limit scale, which most likely
 627          * will be 0b signifying 1x.  Bits [14:7] are the Set
 628          * Power Limit Value, which is a number which is multiplied
 629          * by the power limit scale to get the actual power limit.
 630          */
 631         DBG(DBG_TLU, NULL, "tlu_init - TLU_SLOT_CAPABILITIES: 0x%llx\n",
 632             CSR_XR(csr_base, TLU_SLOT_CAPABILITIES));
 633 
 634         /*
 635          * CSR_V TLU_UNCORRECTABLE_ERROR_LOG_ENABLE Expect Kernel 0x17F011
 636          */
 637         DBG(DBG_TLU, NULL,
 638             "tlu_init - TLU_UNCORRECTABLE_ERROR_LOG_ENABLE: 0x%llx\n",
 639             CSR_XR(csr_base, TLU_UNCORRECTABLE_ERROR_LOG_ENABLE));
 640 
 641         /*
 642          * CSR_V TLU_UNCORRECTABLE_ERROR_INTERRUPT_ENABLE Expect
 643          * Kernel 0x17F0110017F011
 644          */
 645         DBG(DBG_TLU, NULL,
 646             "tlu_init - TLU_UNCORRECTABLE_ERROR_INTERRUPT_ENABLE: 0x%llx\n",
 647             CSR_XR(csr_base, TLU_UNCORRECTABLE_ERROR_INTERRUPT_ENABLE));
 648 
 649         /*
 650          * CSR_V TLU_UNCORRECTABLE_ERROR_INTERRUPT_STATUS Expect HW 0x0
 651          */
 652         DBG(DBG_TLU, NULL,
 653             "tlu_init - TLU_UNCORRECTABLE_ERROR_INTERRUPT_STATUS: 0x%llx\n",
 654             CSR_XR(csr_base, TLU_UNCORRECTABLE_ERROR_INTERRUPT_STATUS));
 655 
 656         /*
 657          * CSR_V TLU_UNCORRECTABLE_ERROR_STATUS_CLEAR Expect HW 0x0
 658          */
 659         DBG(DBG_TLU, NULL,
 660             "tlu_init - TLU_UNCORRECTABLE_ERROR_STATUS_CLEAR: 0x%llx\n",
 661             CSR_XR(csr_base, TLU_UNCORRECTABLE_ERROR_STATUS_CLEAR));
 662 
 663         /*
 664          * CSR_V TLU_RECEIVE_UNCORRECTABLE_ERROR_HEADER1_LOG HW 0x0
 665          */
 666         DBG(DBG_TLU, NULL,
 667             "tlu_init - TLU_RECEIVE_UNCORRECTABLE_ERROR_HEADER1_LOG: 0x%llx\n",
 668             CSR_XR(csr_base, TLU_RECEIVE_UNCORRECTABLE_ERROR_HEADER1_LOG));
 669 
 670         /*
 671          * CSR_V TLU_RECEIVE_UNCORRECTABLE_ERROR_HEADER2_LOG HW 0x0
 672          */
 673         DBG(DBG_TLU, NULL,
 674             "tlu_init - TLU_RECEIVE_UNCORRECTABLE_ERROR_HEADER2_LOG: 0x%llx\n",
 675             CSR_XR(csr_base, TLU_RECEIVE_UNCORRECTABLE_ERROR_HEADER2_LOG));
 676 
 677         /*
 678          * CSR_V TLU_TRANSMIT_UNCORRECTABLE_ERROR_HEADER1_LOG HW 0x0
 679          */
 680         DBG(DBG_TLU, NULL,
 681             "tlu_init - TLU_TRANSMIT_UNCORRECTABLE_ERROR_HEADER1_LOG: 0x%llx\n",
 682             CSR_XR(csr_base, TLU_TRANSMIT_UNCORRECTABLE_ERROR_HEADER1_LOG));
 683 
 684         /*
 685          * CSR_V TLU_TRANSMIT_UNCORRECTABLE_ERROR_HEADER2_LOG HW 0x0
 686          */
 687         DBG(DBG_TLU, NULL,
 688             "tlu_init - TLU_TRANSMIT_UNCORRECTABLE_ERROR_HEADER2_LOG: 0x%llx\n",
 689             CSR_XR(csr_base, TLU_TRANSMIT_UNCORRECTABLE_ERROR_HEADER2_LOG));
 690 
 691 
 692         /*
 693          * CSR_V TLU's CE interrupt regs (log, enable, status, clear)
 694          * Plus header logs
 695          */
 696 
 697         /*
 698          * CSR_V TLU_CORRECTABLE_ERROR_LOG_ENABLE Expect Kernel 0x11C1
 699          */
 700         DBG(DBG_TLU, NULL,
 701             "tlu_init - TLU_CORRECTABLE_ERROR_LOG_ENABLE: 0x%llx\n",
 702             CSR_XR(csr_base, TLU_CORRECTABLE_ERROR_LOG_ENABLE));
 703 
 704         /*
 705          * CSR_V TLU_CORRECTABLE_ERROR_INTERRUPT_ENABLE Kernel 0x11C1000011C1
 706          */
 707         DBG(DBG_TLU, NULL,
 708             "tlu_init - TLU_CORRECTABLE_ERROR_INTERRUPT_ENABLE: 0x%llx\n",
 709             CSR_XR(csr_base, TLU_CORRECTABLE_ERROR_INTERRUPT_ENABLE));
 710 
 711         /*
 712          * CSR_V TLU_CORRECTABLE_ERROR_INTERRUPT_STATUS Expect HW 0x0
 713          */
 714         DBG(DBG_TLU, NULL,
 715             "tlu_init - TLU_CORRECTABLE_ERROR_INTERRUPT_STATUS: 0x%llx\n",
 716             CSR_XR(csr_base, TLU_CORRECTABLE_ERROR_INTERRUPT_STATUS));
 717 
 718         /*
 719          * CSR_V TLU_CORRECTABLE_ERROR_STATUS_CLEAR Expect HW 0x0
 720          */
 721         DBG(DBG_TLU, NULL,
 722             "tlu_init - TLU_CORRECTABLE_ERROR_STATUS_CLEAR: 0x%llx\n",
 723             CSR_XR(csr_base, TLU_CORRECTABLE_ERROR_STATUS_CLEAR));
 724 }
 725 
 726 /* ARGSUSED */
 727 static void
 728 lpu_init(caddr_t csr_base, pxu_t *pxu_p)
 729 {
 730         /* Variables used to set the ACKNAK Latency Timer and Replay Timer */
 731         int link_width, max_payload;
 732 
 733         uint64_t val;
 734 
 735         /*
 736          * Get the Link Width.  See table above LINK_WIDTH_ARR_SIZE #define
 737          * Only Link Widths of x1, x4, and x8 are supported.
 738          * If any width is reported other than x8, set default to x8.
 739          */
 740         link_width = CSR_FR(csr_base, TLU_LINK_STATUS, WIDTH);
 741         DBG(DBG_LPU, NULL, "lpu_init - Link Width: x%d\n", link_width);
 742 
 743         /*
 744          * Convert link_width to match timer array configuration.
 745          */
 746         switch (link_width) {
 747         case 1:
 748                 link_width = 0;
 749                 break;
 750         case 4:
 751                 link_width = 1;
 752                 break;
 753         case 8:
 754                 link_width = 2;
 755                 break;
 756         case 16:
 757                 link_width = 3;
 758                 break;
 759         default:
 760                 link_width = 0;
 761         }
 762 
 763         /*
 764          * Get the Max Payload Size.
 765          * See table above LINK_MAX_PKT_ARR_SIZE #define
 766          */
 767         max_payload = ((CSR_FR(csr_base, TLU_CONTROL, CONFIG) &
 768             TLU_CONTROL_MPS_MASK) >> TLU_CONTROL_MPS_SHIFT);
 769 
 770         DBG(DBG_LPU, NULL, "lpu_init - May Payload: %d\n",
 771             (0x80 << max_payload));
 772 
 773         /* Make sure the packet size is not greater than 4096 */
 774         max_payload = (max_payload >= LINK_MAX_PKT_ARR_SIZE) ?
 775             (LINK_MAX_PKT_ARR_SIZE - 1) : max_payload;
 776 
 777         /*
 778          * CSR_V LPU_ID Expect HW 0x0
 779          */
 780 
 781         /*
 782          * This register has link id, phy id and gigablaze id.
 783          * Should be set by HW.
 784          */
 785         DBG(DBG_LPU, NULL, "lpu_init - LPU_ID: 0x%llx\n",
 786             CSR_XR(csr_base, LPU_ID));
 787 
 788         /*
 789          * CSR_V LPU_RESET Expect Kernel 0x0
 790          */
 791 
 792         /*
 793          * No reason to have any reset bits high until an error is
 794          * detected on the link.
 795          */
 796         val = 0ull;
 797         CSR_XS(csr_base, LPU_RESET, val);
 798         DBG(DBG_LPU, NULL, "lpu_init - LPU_RESET: 0x%llx\n",
 799             CSR_XR(csr_base, LPU_RESET));
 800 
 801         /*
 802          * CSR_V LPU_DEBUG_STATUS Expect HW 0x0
 803          */
 804 
 805         /*
 806          * Bits [15:8] are Debug B, and bit [7:0] are Debug A.
 807          * They are read-only.  What do the 8 bits mean, and
 808          * how do they get set if they are read only?
 809          */
 810         DBG(DBG_LPU, NULL, "lpu_init - LPU_DEBUG_STATUS: 0x%llx\n",
 811             CSR_XR(csr_base, LPU_DEBUG_STATUS));
 812 
 813         /*
 814          * CSR_V LPU_DEBUG_CONFIG Expect Kernel 0x0
 815          */
 816         DBG(DBG_LPU, NULL, "lpu_init - LPU_DEBUG_CONFIG: 0x%llx\n",
 817             CSR_XR(csr_base, LPU_DEBUG_CONFIG));
 818 
 819         /*
 820          * CSR_V LPU_LTSSM_CONTROL Expect HW 0x0
 821          */
 822         DBG(DBG_LPU, NULL, "lpu_init - LPU_LTSSM_CONTROL: 0x%llx\n",
 823             CSR_XR(csr_base, LPU_LTSSM_CONTROL));
 824 
 825         /*
 826          * CSR_V LPU_LINK_STATUS Expect HW 0x101
 827          */
 828 
 829         /*
 830          * This register has bits [9:4] for link width, and the
 831          * default 0x10, means a width of x16.  The problem is
 832          * this width is not supported according to the TLU
 833          * link status register.
 834          */
 835         DBG(DBG_LPU, NULL, "lpu_init - LPU_LINK_STATUS: 0x%llx\n",
 836             CSR_XR(csr_base, LPU_LINK_STATUS));
 837 
 838         /*
 839          * CSR_V LPU_INTERRUPT_STATUS Expect HW 0x0
 840          */
 841         DBG(DBG_LPU, NULL, "lpu_init - LPU_INTERRUPT_STATUS: 0x%llx\n",
 842             CSR_XR(csr_base, LPU_INTERRUPT_STATUS));
 843 
 844         /*
 845          * CSR_V LPU_INTERRUPT_MASK Expect HW 0x0
 846          */
 847         DBG(DBG_LPU, NULL, "lpu_init - LPU_INTERRUPT_MASK: 0x%llx\n",
 848             CSR_XR(csr_base, LPU_INTERRUPT_MASK));
 849 
 850         /*
 851          * CSR_V LPU_LINK_PERFORMANCE_COUNTER_SELECT Expect HW 0x0
 852          */
 853         DBG(DBG_LPU, NULL,
 854             "lpu_init - LPU_LINK_PERFORMANCE_COUNTER_SELECT: 0x%llx\n",
 855             CSR_XR(csr_base, LPU_LINK_PERFORMANCE_COUNTER_SELECT));
 856 
 857         /*
 858          * CSR_V LPU_LINK_PERFORMANCE_COUNTER_CONTROL Expect HW 0x0
 859          */
 860         DBG(DBG_LPU, NULL,
 861             "lpu_init - LPU_LINK_PERFORMANCE_COUNTER_CONTROL: 0x%llx\n",
 862             CSR_XR(csr_base, LPU_LINK_PERFORMANCE_COUNTER_CONTROL));
 863 
 864         /*
 865          * CSR_V LPU_LINK_PERFORMANCE_COUNTER1 Expect HW 0x0
 866          */
 867         DBG(DBG_LPU, NULL,
 868             "lpu_init - LPU_LINK_PERFORMANCE_COUNTER1: 0x%llx\n",
 869             CSR_XR(csr_base, LPU_LINK_PERFORMANCE_COUNTER1));
 870 
 871         /*
 872          * CSR_V LPU_LINK_PERFORMANCE_COUNTER1_TEST Expect HW 0x0
 873          */
 874         DBG(DBG_LPU, NULL,
 875             "lpu_init - LPU_LINK_PERFORMANCE_COUNTER1_TEST: 0x%llx\n",
 876             CSR_XR(csr_base, LPU_LINK_PERFORMANCE_COUNTER1_TEST));
 877 
 878         /*
 879          * CSR_V LPU_LINK_PERFORMANCE_COUNTER2 Expect HW 0x0
 880          */
 881         DBG(DBG_LPU, NULL,
 882             "lpu_init - LPU_LINK_PERFORMANCE_COUNTER2: 0x%llx\n",
 883             CSR_XR(csr_base, LPU_LINK_PERFORMANCE_COUNTER2));
 884 
 885         /*
 886          * CSR_V LPU_LINK_PERFORMANCE_COUNTER2_TEST Expect HW 0x0
 887          */
 888         DBG(DBG_LPU, NULL,
 889             "lpu_init - LPU_LINK_PERFORMANCE_COUNTER2_TEST: 0x%llx\n",
 890             CSR_XR(csr_base, LPU_LINK_PERFORMANCE_COUNTER2_TEST));
 891 
 892         /*
 893          * CSR_V LPU_LINK_LAYER_CONFIG Expect HW 0x100
 894          */
 895 
 896         /*
 897          * This is another place where Max Payload can be set,
 898          * this time for the link layer.  It will be set to
 899          * 128B, which is the default, but this will need to
 900          * be revisited.
 901          */
 902         val = (1ull << LPU_LINK_LAYER_CONFIG_VC0_EN);
 903         CSR_XS(csr_base, LPU_LINK_LAYER_CONFIG, val);
 904         DBG(DBG_LPU, NULL, "lpu_init - LPU_LINK_LAYER_CONFIG: 0x%llx\n",
 905             CSR_XR(csr_base, LPU_LINK_LAYER_CONFIG));
 906 
 907         /*
 908          * CSR_V LPU_LINK_LAYER_STATUS Expect OBP 0x5
 909          */
 910 
 911         /*
 912          * Another R/W status register.  Bit 3, DL up Status, will
 913          * be set high.  The link state machine status bits [2:0]
 914          * are set to 0x1, but the status bits are not defined in the
 915          * PRM.  What does 0x1 mean, what others values are possible
 916          * and what are thier meanings?
 917          *
 918          * This register has been giving us problems in simulation.
 919          * It has been mentioned that software should not program
 920          * any registers with WE bits except during debug.  So
 921          * this register will no longer be programmed.
 922          */
 923 
 924         DBG(DBG_LPU, NULL, "lpu_init - LPU_LINK_LAYER_STATUS: 0x%llx\n",
 925             CSR_XR(csr_base, LPU_LINK_LAYER_STATUS));
 926 
 927         /*
 928          * CSR_V LPU_LINK_LAYER_INTERRUPT_AND_STATUS_TEST Expect HW 0x0
 929          */
 930         DBG(DBG_LPU, NULL,
 931             "lpu_init - LPU_LINK_LAYER_INTERRUPT_AND_STATUS_TEST: 0x%llx\n",
 932             CSR_XR(csr_base, LPU_LINK_LAYER_INTERRUPT_AND_STATUS_TEST));
 933 
 934         /*
 935          * CSR_V LPU Link Layer interrupt regs (mask, status)
 936          */
 937         DBG(DBG_LPU, NULL,
 938             "lpu_init - LPU_LINK_LAYER_INTERRUPT_MASK: 0x%llx\n",
 939             CSR_XR(csr_base, LPU_LINK_LAYER_INTERRUPT_MASK));
 940 
 941         DBG(DBG_LPU, NULL,
 942             "lpu_init - LPU_LINK_LAYER_INTERRUPT_AND_STATUS: 0x%llx\n",
 943             CSR_XR(csr_base, LPU_LINK_LAYER_INTERRUPT_AND_STATUS));
 944 
 945         /*
 946          * CSR_V LPU_FLOW_CONTROL_UPDATE_CONTROL Expect OBP 0x7
 947          */
 948 
 949         /*
 950          * The PRM says that only the first two bits will be set
 951          * high by default, which will enable flow control for
 952          * posted and non-posted updates, but NOT completetion
 953          * updates.
 954          */
 955         val = (1ull << LPU_FLOW_CONTROL_UPDATE_CONTROL_FC0_U_NP_EN) |
 956             (1ull << LPU_FLOW_CONTROL_UPDATE_CONTROL_FC0_U_P_EN);
 957         CSR_XS(csr_base, LPU_FLOW_CONTROL_UPDATE_CONTROL, val);
 958         DBG(DBG_LPU, NULL,
 959             "lpu_init - LPU_FLOW_CONTROL_UPDATE_CONTROL: 0x%llx\n",
 960             CSR_XR(csr_base, LPU_FLOW_CONTROL_UPDATE_CONTROL));
 961 
 962         /*
 963          * CSR_V LPU_LINK_LAYER_FLOW_CONTROL_UPDATE_TIMEOUT_VALUE
 964          * Expect OBP 0x1D4C
 965          */
 966 
 967         /*
 968          * This should be set by OBP.  We'll check to make sure.
 969          */
 970         DBG(DBG_LPU, NULL, "lpu_init - "
 971             "LPU_LINK_LAYER_FLOW_CONTROL_UPDATE_TIMEOUT_VALUE: 0x%llx\n",
 972             CSR_XR(csr_base,
 973             LPU_LINK_LAYER_FLOW_CONTROL_UPDATE_TIMEOUT_VALUE));
 974 
 975         /*
 976          * CSR_V LPU_LINK_LAYER_VC0_FLOW_CONTROL_UPDATE_TIMER0 Expect OBP ???
 977          */
 978 
 979         /*
 980          * This register has Flow Control Update Timer values for
 981          * non-posted and posted requests, bits [30:16] and bits
 982          * [14:0], respectively.  These are read-only to SW so
 983          * either HW or OBP needs to set them.
 984          */
 985         DBG(DBG_LPU, NULL, "lpu_init - "
 986             "LPU_LINK_LAYER_VC0_FLOW_CONTROL_UPDATE_TIMER0: 0x%llx\n",
 987             CSR_XR(csr_base,
 988             LPU_LINK_LAYER_VC0_FLOW_CONTROL_UPDATE_TIMER0));
 989 
 990         /*
 991          * CSR_V LPU_LINK_LAYER_VC0_FLOW_CONTROL_UPDATE_TIMER1 Expect OBP ???
 992          */
 993 
 994         /*
 995          * Same as timer0 register above, except for bits [14:0]
 996          * have the timer values for completetions.  Read-only to
 997          * SW; OBP or HW need to set it.
 998          */
 999         DBG(DBG_LPU, NULL, "lpu_init - "
1000             "LPU_LINK_LAYER_VC0_FLOW_CONTROL_UPDATE_TIMER1: 0x%llx\n",
1001             CSR_XR(csr_base,
1002             LPU_LINK_LAYER_VC0_FLOW_CONTROL_UPDATE_TIMER1));
1003 
1004         /*
1005          * CSR_V LPU_TXLINK_FREQUENT_NAK_LATENCY_TIMER_THRESHOLD
1006          */
1007         val = px_acknak_timer_table[max_payload][link_width];
1008         CSR_XS(csr_base, LPU_TXLINK_FREQUENT_NAK_LATENCY_TIMER_THRESHOLD, val);
1009 
1010         DBG(DBG_LPU, NULL, "lpu_init - "
1011             "LPU_TXLINK_FREQUENT_NAK_LATENCY_TIMER_THRESHOLD: 0x%llx\n",
1012             CSR_XR(csr_base, LPU_TXLINK_FREQUENT_NAK_LATENCY_TIMER_THRESHOLD));
1013 
1014         /*
1015          * CSR_V LPU_TXLINK_ACKNAK_LATENCY_TIMER Expect HW 0x0
1016          */
1017         DBG(DBG_LPU, NULL,
1018             "lpu_init - LPU_TXLINK_ACKNAK_LATENCY_TIMER: 0x%llx\n",
1019             CSR_XR(csr_base, LPU_TXLINK_ACKNAK_LATENCY_TIMER));
1020 
1021         /*
1022          * CSR_V LPU_TXLINK_REPLAY_TIMER_THRESHOLD
1023          */
1024         val = px_replay_timer_table[max_payload][link_width];
1025         CSR_XS(csr_base, LPU_TXLINK_REPLAY_TIMER_THRESHOLD, val);
1026 
1027         DBG(DBG_LPU, NULL,
1028             "lpu_init - LPU_TXLINK_REPLAY_TIMER_THRESHOLD: 0x%llx\n",
1029             CSR_XR(csr_base, LPU_TXLINK_REPLAY_TIMER_THRESHOLD));
1030 
1031         /*
1032          * CSR_V LPU_TXLINK_REPLAY_TIMER Expect HW 0x0
1033          */
1034         DBG(DBG_LPU, NULL, "lpu_init - LPU_TXLINK_REPLAY_TIMER: 0x%llx\n",
1035             CSR_XR(csr_base, LPU_TXLINK_REPLAY_TIMER));
1036 
1037         /*
1038          * CSR_V LPU_TXLINK_REPLAY_NUMBER_STATUS Expect OBP 0x3
1039          */
1040         DBG(DBG_LPU, NULL,
1041             "lpu_init - LPU_TXLINK_REPLAY_NUMBER_STATUS: 0x%llx\n",
1042             CSR_XR(csr_base, LPU_TXLINK_REPLAY_NUMBER_STATUS));
1043 
1044         /*
1045          * CSR_V LPU_REPLAY_BUFFER_MAX_ADDRESS Expect OBP 0xB3F
1046          */
1047         DBG(DBG_LPU, NULL,
1048             "lpu_init - LPU_REPLAY_BUFFER_MAX_ADDRESS: 0x%llx\n",
1049             CSR_XR(csr_base, LPU_REPLAY_BUFFER_MAX_ADDRESS));
1050 
1051         /*
1052          * CSR_V LPU_TXLINK_RETRY_FIFO_POINTER Expect OBP 0xFFFF0000
1053          */
1054         val = ((LPU_TXLINK_RETRY_FIFO_POINTER_RTRY_FIFO_TLPTR_DEFAULT <<
1055             LPU_TXLINK_RETRY_FIFO_POINTER_RTRY_FIFO_TLPTR) |
1056             (LPU_TXLINK_RETRY_FIFO_POINTER_RTRY_FIFO_HDPTR_DEFAULT <<
1057             LPU_TXLINK_RETRY_FIFO_POINTER_RTRY_FIFO_HDPTR));
1058 
1059         CSR_XS(csr_base, LPU_TXLINK_RETRY_FIFO_POINTER, val);
1060         DBG(DBG_LPU, NULL,
1061             "lpu_init - LPU_TXLINK_RETRY_FIFO_POINTER: 0x%llx\n",
1062             CSR_XR(csr_base, LPU_TXLINK_RETRY_FIFO_POINTER));
1063 
1064         /*
1065          * CSR_V LPU_TXLINK_RETRY_FIFO_R_W_POINTER Expect OBP 0x0
1066          */
1067         DBG(DBG_LPU, NULL,
1068             "lpu_init - LPU_TXLINK_RETRY_FIFO_R_W_POINTER: 0x%llx\n",
1069             CSR_XR(csr_base, LPU_TXLINK_RETRY_FIFO_R_W_POINTER));
1070 
1071         /*
1072          * CSR_V LPU_TXLINK_RETRY_FIFO_CREDIT Expect HW 0x1580
1073          */
1074         DBG(DBG_LPU, NULL,
1075             "lpu_init - LPU_TXLINK_RETRY_FIFO_CREDIT: 0x%llx\n",
1076             CSR_XR(csr_base, LPU_TXLINK_RETRY_FIFO_CREDIT));
1077 
1078         /*
1079          * CSR_V LPU_TXLINK_SEQUENCE_COUNTER Expect OBP 0xFFF0000
1080          */
1081         DBG(DBG_LPU, NULL, "lpu_init - LPU_TXLINK_SEQUENCE_COUNTER: 0x%llx\n",
1082             CSR_XR(csr_base, LPU_TXLINK_SEQUENCE_COUNTER));
1083 
1084         /*
1085          * CSR_V LPU_TXLINK_ACK_SENT_SEQUENCE_NUMBER Expect HW 0xFFF
1086          */
1087         DBG(DBG_LPU, NULL,
1088             "lpu_init - LPU_TXLINK_ACK_SENT_SEQUENCE_NUMBER: 0x%llx\n",
1089             CSR_XR(csr_base, LPU_TXLINK_ACK_SENT_SEQUENCE_NUMBER));
1090 
1091         /*
1092          * CSR_V LPU_TXLINK_SEQUENCE_COUNT_FIFO_MAX_ADDR Expect OBP 0x157
1093          */
1094 
1095         /*
1096          * Test only register.  Will not be programmed.
1097          */
1098         DBG(DBG_LPU, NULL,
1099             "lpu_init - LPU_TXLINK_SEQUENCE_COUNT_FIFO_MAX_ADDR: 0x%llx\n",
1100             CSR_XR(csr_base, LPU_TXLINK_SEQUENCE_COUNT_FIFO_MAX_ADDR));
1101 
1102         /*
1103          * CSR_V LPU_TXLINK_SEQUENCE_COUNT_FIFO_POINTERS Expect HW 0xFFF0000
1104          */
1105 
1106         /*
1107          * Test only register.  Will not be programmed.
1108          */
1109         DBG(DBG_LPU, NULL,
1110             "lpu_init - LPU_TXLINK_SEQUENCE_COUNT_FIFO_POINTERS: 0x%llx\n",
1111             CSR_XR(csr_base, LPU_TXLINK_SEQUENCE_COUNT_FIFO_POINTERS));
1112 
1113         /*
1114          * CSR_V LPU_TXLINK_SEQUENCE_COUNT_R_W_POINTERS Expect HW 0x0
1115          */
1116         DBG(DBG_LPU, NULL,
1117             "lpu_init - LPU_TXLINK_SEQUENCE_COUNT_R_W_POINTERS: 0x%llx\n",
1118             CSR_XR(csr_base, LPU_TXLINK_SEQUENCE_COUNT_R_W_POINTERS));
1119 
1120         /*
1121          * CSR_V LPU_TXLINK_TEST_CONTROL Expect HW 0x0
1122          */
1123         DBG(DBG_LPU, NULL, "lpu_init - LPU_TXLINK_TEST_CONTROL: 0x%llx\n",
1124             CSR_XR(csr_base, LPU_TXLINK_TEST_CONTROL));
1125 
1126         /*
1127          * CSR_V LPU_TXLINK_MEMORY_ADDRESS_CONTROL Expect HW 0x0
1128          */
1129 
1130         /*
1131          * Test only register.  Will not be programmed.
1132          */
1133         DBG(DBG_LPU, NULL,
1134             "lpu_init - LPU_TXLINK_MEMORY_ADDRESS_CONTROL: 0x%llx\n",
1135             CSR_XR(csr_base, LPU_TXLINK_MEMORY_ADDRESS_CONTROL));
1136 
1137         /*
1138          * CSR_V LPU_TXLINK_MEMORY_DATA_LOAD0 Expect HW 0x0
1139          */
1140         DBG(DBG_LPU, NULL,
1141             "lpu_init - LPU_TXLINK_MEMORY_DATA_LOAD0: 0x%llx\n",
1142             CSR_XR(csr_base, LPU_TXLINK_MEMORY_DATA_LOAD0));
1143 
1144         /*
1145          * CSR_V LPU_TXLINK_MEMORY_DATA_LOAD1 Expect HW 0x0
1146          */
1147         DBG(DBG_LPU, NULL,
1148             "lpu_init - LPU_TXLINK_MEMORY_DATA_LOAD1: 0x%llx\n",
1149             CSR_XR(csr_base, LPU_TXLINK_MEMORY_DATA_LOAD1));
1150 
1151         /*
1152          * CSR_V LPU_TXLINK_MEMORY_DATA_LOAD2 Expect HW 0x0
1153          */
1154         DBG(DBG_LPU, NULL,
1155             "lpu_init - LPU_TXLINK_MEMORY_DATA_LOAD2: 0x%llx\n",
1156             CSR_XR(csr_base, LPU_TXLINK_MEMORY_DATA_LOAD2));
1157 
1158         /*
1159          * CSR_V LPU_TXLINK_MEMORY_DATA_LOAD3 Expect HW 0x0
1160          */
1161         DBG(DBG_LPU, NULL,
1162             "lpu_init - LPU_TXLINK_MEMORY_DATA_LOAD3: 0x%llx\n",
1163             CSR_XR(csr_base, LPU_TXLINK_MEMORY_DATA_LOAD3));
1164 
1165         /*
1166          * CSR_V LPU_TXLINK_MEMORY_DATA_LOAD4 Expect HW 0x0
1167          */
1168         DBG(DBG_LPU, NULL,
1169             "lpu_init - LPU_TXLINK_MEMORY_DATA_LOAD4: 0x%llx\n",
1170             CSR_XR(csr_base, LPU_TXLINK_MEMORY_DATA_LOAD4));
1171 
1172         /*
1173          * CSR_V LPU_TXLINK_RETRY_DATA_COUNT Expect HW 0x0
1174          */
1175 
1176         /*
1177          * Test only register.  Will not be programmed.
1178          */
1179         DBG(DBG_LPU, NULL, "lpu_init - LPU_TXLINK_RETRY_DATA_COUNT: 0x%llx\n",
1180             CSR_XR(csr_base, LPU_TXLINK_RETRY_DATA_COUNT));
1181 
1182         /*
1183          * CSR_V LPU_TXLINK_SEQUENCE_BUFFER_COUNT Expect HW 0x0
1184          */
1185 
1186         /*
1187          * Test only register.  Will not be programmed.
1188          */
1189         DBG(DBG_LPU, NULL,
1190             "lpu_init - LPU_TXLINK_SEQUENCE_BUFFER_COUNT: 0x%llx\n",
1191             CSR_XR(csr_base, LPU_TXLINK_SEQUENCE_BUFFER_COUNT));
1192 
1193         /*
1194          * CSR_V LPU_TXLINK_SEQUENCE_BUFFER_BOTTOM_DATA Expect HW 0x0
1195          */
1196 
1197         /*
1198          * Test only register.
1199          */
1200         DBG(DBG_LPU, NULL,
1201             "lpu_init - LPU_TXLINK_SEQUENCE_BUFFER_BOTTOM_DATA: 0x%llx\n",
1202             CSR_XR(csr_base, LPU_TXLINK_SEQUENCE_BUFFER_BOTTOM_DATA));
1203 
1204         /*
1205          * CSR_V LPU_RXLINK_NEXT_RECEIVE_SEQUENCE_1_COUNTER Expect HW 0x0
1206          */
1207         DBG(DBG_LPU, NULL, "lpu_init - "
1208             "LPU_RXLINK_NEXT_RECEIVE_SEQUENCE_1_COUNTER: 0x%llx\n",
1209             CSR_XR(csr_base, LPU_RXLINK_NEXT_RECEIVE_SEQUENCE_1_COUNTER));
1210 
1211         /*
1212          * CSR_V LPU_RXLINK_UNSUPPORTED_DLLP_RECEIVED Expect HW 0x0
1213          */
1214 
1215         /*
1216          * test only register.
1217          */
1218         DBG(DBG_LPU, NULL,
1219             "lpu_init - LPU_RXLINK_UNSUPPORTED_DLLP_RECEIVED: 0x%llx\n",
1220             CSR_XR(csr_base, LPU_RXLINK_UNSUPPORTED_DLLP_RECEIVED));
1221 
1222         /*
1223          * CSR_V LPU_RXLINK_TEST_CONTROL Expect HW 0x0
1224          */
1225 
1226         /*
1227          * test only register.
1228          */
1229         DBG(DBG_LPU, NULL, "lpu_init - LPU_RXLINK_TEST_CONTROL: 0x%llx\n",
1230             CSR_XR(csr_base, LPU_RXLINK_TEST_CONTROL));
1231 
1232         /*
1233          * CSR_V LPU_PHYSICAL_LAYER_CONFIGURATION Expect HW 0x10
1234          */
1235         DBG(DBG_LPU, NULL,
1236             "lpu_init - LPU_PHYSICAL_LAYER_CONFIGURATION: 0x%llx\n",
1237             CSR_XR(csr_base, LPU_PHYSICAL_LAYER_CONFIGURATION));
1238 
1239         /*
1240          * CSR_V LPU_PHY_LAYER_STATUS Expect HW 0x0
1241          */
1242         DBG(DBG_LPU, NULL, "lpu_init - LPU_PHY_LAYER_STATUS: 0x%llx\n",
1243             CSR_XR(csr_base, LPU_PHY_LAYER_STATUS));
1244 
1245         /*
1246          * CSR_V LPU_PHY_INTERRUPT_AND_STATUS_TEST Expect HW 0x0
1247          */
1248         DBG(DBG_LPU, NULL,
1249             "lpu_init - LPU_PHY_INTERRUPT_AND_STATUS_TEST: 0x%llx\n",
1250             CSR_XR(csr_base, LPU_PHY_INTERRUPT_AND_STATUS_TEST));
1251 
1252         /*
1253          * CSR_V LPU PHY LAYER interrupt regs (mask, status)
1254          */
1255         DBG(DBG_LPU, NULL, "lpu_init - LPU_PHY_INTERRUPT_MASK: 0x%llx\n",
1256             CSR_XR(csr_base, LPU_PHY_INTERRUPT_MASK));
1257 
1258         DBG(DBG_LPU, NULL,
1259             "lpu_init - LPU_PHY_LAYER_INTERRUPT_AND_STATUS: 0x%llx\n",
1260             CSR_XR(csr_base, LPU_PHY_LAYER_INTERRUPT_AND_STATUS));
1261 
1262         /*
1263          * CSR_V LPU_RECEIVE_PHY_CONFIG Expect HW 0x0
1264          */
1265 
1266         /*
1267          * This also needs some explanation.  What is the best value
1268          * for the water mark?  Test mode enables which test mode?
1269          * Programming model needed for the Receiver Reset Lane N
1270          * bits.
1271          */
1272         DBG(DBG_LPU, NULL, "lpu_init - LPU_RECEIVE_PHY_CONFIG: 0x%llx\n",
1273             CSR_XR(csr_base, LPU_RECEIVE_PHY_CONFIG));
1274 
1275         /*
1276          * CSR_V LPU_RECEIVE_PHY_STATUS1 Expect HW 0x0
1277          */
1278         DBG(DBG_LPU, NULL, "lpu_init - LPU_RECEIVE_PHY_STATUS1: 0x%llx\n",
1279             CSR_XR(csr_base, LPU_RECEIVE_PHY_STATUS1));
1280 
1281         /*
1282          * CSR_V LPU_RECEIVE_PHY_STATUS2 Expect HW 0x0
1283          */
1284         DBG(DBG_LPU, NULL, "lpu_init - LPU_RECEIVE_PHY_STATUS2: 0x%llx\n",
1285             CSR_XR(csr_base, LPU_RECEIVE_PHY_STATUS2));
1286 
1287         /*
1288          * CSR_V LPU_RECEIVE_PHY_STATUS3 Expect HW 0x0
1289          */
1290         DBG(DBG_LPU, NULL, "lpu_init - LPU_RECEIVE_PHY_STATUS3: 0x%llx\n",
1291             CSR_XR(csr_base, LPU_RECEIVE_PHY_STATUS3));
1292 
1293         /*
1294          * CSR_V LPU_RECEIVE_PHY_INTERRUPT_AND_STATUS_TEST Expect HW 0x0
1295          */
1296         DBG(DBG_LPU, NULL,
1297             "lpu_init - LPU_RECEIVE_PHY_INTERRUPT_AND_STATUS_TEST: 0x%llx\n",
1298             CSR_XR(csr_base, LPU_RECEIVE_PHY_INTERRUPT_AND_STATUS_TEST));
1299 
1300         /*
1301          * CSR_V LPU RX LAYER interrupt regs (mask, status)
1302          */
1303         DBG(DBG_LPU, NULL,
1304             "lpu_init - LPU_RECEIVE_PHY_INTERRUPT_MASK: 0x%llx\n",
1305             CSR_XR(csr_base, LPU_RECEIVE_PHY_INTERRUPT_MASK));
1306 
1307         DBG(DBG_LPU, NULL,
1308             "lpu_init - LPU_RECEIVE_PHY_INTERRUPT_AND_STATUS: 0x%llx\n",
1309             CSR_XR(csr_base, LPU_RECEIVE_PHY_INTERRUPT_AND_STATUS));
1310 
1311         /*
1312          * CSR_V LPU_TRANSMIT_PHY_CONFIG Expect HW 0x0
1313          */
1314         DBG(DBG_LPU, NULL, "lpu_init - LPU_TRANSMIT_PHY_CONFIG: 0x%llx\n",
1315             CSR_XR(csr_base, LPU_TRANSMIT_PHY_CONFIG));
1316 
1317         /*
1318          * CSR_V LPU_TRANSMIT_PHY_STATUS Expect HW 0x0
1319          */
1320         DBG(DBG_LPU, NULL, "lpu_init - LPU_TRANSMIT_PHY_STATUS: 0x%llx\n",
1321             CSR_XR(csr_base, LPU_TRANSMIT_PHY_STATUS));
1322 
1323         /*
1324          * CSR_V LPU_TRANSMIT_PHY_INTERRUPT_AND_STATUS_TEST Expect HW 0x0
1325          */
1326         DBG(DBG_LPU, NULL,
1327             "lpu_init - LPU_TRANSMIT_PHY_INTERRUPT_AND_STATUS_TEST: 0x%llx\n",
1328             CSR_XR(csr_base,
1329             LPU_TRANSMIT_PHY_INTERRUPT_AND_STATUS_TEST));
1330 
1331         /*
1332          * CSR_V LPU TX LAYER interrupt regs (mask, status)
1333          */
1334         DBG(DBG_LPU, NULL,
1335             "lpu_init - LPU_TRANSMIT_PHY_INTERRUPT_MASK: 0x%llx\n",
1336             CSR_XR(csr_base, LPU_TRANSMIT_PHY_INTERRUPT_MASK));
1337 
1338         DBG(DBG_LPU, NULL,
1339             "lpu_init - LPU_TRANSMIT_PHY_INTERRUPT_AND_STATUS: 0x%llx\n",
1340             CSR_XR(csr_base, LPU_TRANSMIT_PHY_INTERRUPT_AND_STATUS));
1341 
1342         /*
1343          * CSR_V LPU_TRANSMIT_PHY_STATUS_2 Expect HW 0x0
1344          */
1345         DBG(DBG_LPU, NULL, "lpu_init - LPU_TRANSMIT_PHY_STATUS_2: 0x%llx\n",
1346             CSR_XR(csr_base, LPU_TRANSMIT_PHY_STATUS_2));
1347 
1348         /*
1349          * CSR_V LPU_LTSSM_CONFIG1 Expect OBP 0x205
1350          */
1351 
1352         /*
1353          * The new PRM has values for LTSSM 8 ns timeout value and
1354          * LTSSM 20 ns timeout value.  But what do these values mean?
1355          * Most of the other bits are questions as well.
1356          *
1357          * As such we will use the reset value.
1358          */
1359         DBG(DBG_LPU, NULL, "lpu_init - LPU_LTSSM_CONFIG1: 0x%llx\n",
1360             CSR_XR(csr_base, LPU_LTSSM_CONFIG1));
1361 
1362         /*
1363          * CSR_V LPU_LTSSM_CONFIG2 Expect OBP 0x2DC6C0
1364          */
1365 
1366         /*
1367          * Again, what does '12 ms timeout value mean'?
1368          */
1369         val = (LPU_LTSSM_CONFIG2_LTSSM_12_TO_DEFAULT <<
1370             LPU_LTSSM_CONFIG2_LTSSM_12_TO);
1371         CSR_XS(csr_base, LPU_LTSSM_CONFIG2, val);
1372         DBG(DBG_LPU, NULL, "lpu_init - LPU_LTSSM_CONFIG2: 0x%llx\n",
1373             CSR_XR(csr_base, LPU_LTSSM_CONFIG2));
1374 
1375         /*
1376          * CSR_V LPU_LTSSM_CONFIG3 Expect OBP 0x7A120
1377          */
1378         val = (LPU_LTSSM_CONFIG3_LTSSM_2_TO_DEFAULT <<
1379             LPU_LTSSM_CONFIG3_LTSSM_2_TO);
1380         CSR_XS(csr_base, LPU_LTSSM_CONFIG3, val);
1381         DBG(DBG_LPU, NULL, "lpu_init - LPU_LTSSM_CONFIG3: 0x%llx\n",
1382             CSR_XR(csr_base, LPU_LTSSM_CONFIG3));
1383 
1384         /*
1385          * CSR_V LPU_LTSSM_CONFIG4 Expect OBP 0x21300
1386          */
1387         val = ((LPU_LTSSM_CONFIG4_DATA_RATE_DEFAULT <<
1388             LPU_LTSSM_CONFIG4_DATA_RATE) |
1389             (LPU_LTSSM_CONFIG4_N_FTS_DEFAULT <<
1390             LPU_LTSSM_CONFIG4_N_FTS));
1391 
1392         CSR_XS(csr_base, LPU_LTSSM_CONFIG4, val);
1393         DBG(DBG_LPU, NULL, "lpu_init - LPU_LTSSM_CONFIG4: 0x%llx\n",
1394             CSR_XR(csr_base, LPU_LTSSM_CONFIG4));
1395 
1396         /*
1397          * CSR_V LPU_LTSSM_CONFIG5 Expect OBP 0x0
1398          */
1399         val = 0ull;
1400         CSR_XS(csr_base, LPU_LTSSM_CONFIG5, val);
1401         DBG(DBG_LPU, NULL, "lpu_init - LPU_LTSSM_CONFIG5: 0x%llx\n",
1402             CSR_XR(csr_base, LPU_LTSSM_CONFIG5));
1403 
1404         /*
1405          * CSR_V LPU_LTSSM_STATUS1 Expect OBP 0x0
1406          */
1407 
1408         /*
1409          * LTSSM Status registers are test only.
1410          */
1411         DBG(DBG_LPU, NULL, "lpu_init - LPU_LTSSM_STATUS1: 0x%llx\n",
1412             CSR_XR(csr_base, LPU_LTSSM_STATUS1));
1413 
1414         /*
1415          * CSR_V LPU_LTSSM_STATUS2 Expect OBP 0x0
1416          */
1417         DBG(DBG_LPU, NULL, "lpu_init - LPU_LTSSM_STATUS2: 0x%llx\n",
1418             CSR_XR(csr_base, LPU_LTSSM_STATUS2));
1419 
1420         /*
1421          * CSR_V LPU_LTSSM_INTERRUPT_AND_STATUS_TEST Expect HW 0x0
1422          */
1423         DBG(DBG_LPU, NULL,
1424             "lpu_init - LPU_LTSSM_INTERRUPT_AND_STATUS_TEST: 0x%llx\n",
1425             CSR_XR(csr_base, LPU_LTSSM_INTERRUPT_AND_STATUS_TEST));
1426 
1427         /*
1428          * CSR_V LPU LTSSM  LAYER interrupt regs (mask, status)
1429          */
1430         DBG(DBG_LPU, NULL, "lpu_init - LPU_LTSSM_INTERRUPT_MASK: 0x%llx\n",
1431             CSR_XR(csr_base, LPU_LTSSM_INTERRUPT_MASK));
1432 
1433         DBG(DBG_LPU, NULL,
1434             "lpu_init - LPU_LTSSM_INTERRUPT_AND_STATUS: 0x%llx\n",
1435             CSR_XR(csr_base, LPU_LTSSM_INTERRUPT_AND_STATUS));
1436 
1437         /*
1438          * CSR_V LPU_LTSSM_STATUS_WRITE_ENABLE Expect OBP 0x0
1439          */
1440         DBG(DBG_LPU, NULL,
1441             "lpu_init - LPU_LTSSM_STATUS_WRITE_ENABLE: 0x%llx\n",
1442             CSR_XR(csr_base, LPU_LTSSM_STATUS_WRITE_ENABLE));
1443 
1444         /*
1445          * CSR_V LPU_GIGABLAZE_GLUE_CONFIG1 Expect OBP 0x88407
1446          */
1447         DBG(DBG_LPU, NULL, "lpu_init - LPU_GIGABLAZE_GLUE_CONFIG1: 0x%llx\n",
1448             CSR_XR(csr_base, LPU_GIGABLAZE_GLUE_CONFIG1));
1449 
1450         /*
1451          * CSR_V LPU_GIGABLAZE_GLUE_CONFIG2 Expect OBP 0x35
1452          */
1453         DBG(DBG_LPU, NULL, "lpu_init - LPU_GIGABLAZE_GLUE_CONFIG2: 0x%llx\n",
1454             CSR_XR(csr_base, LPU_GIGABLAZE_GLUE_CONFIG2));
1455 
1456         /*
1457          * CSR_V LPU_GIGABLAZE_GLUE_CONFIG3 Expect OBP 0x4400FA
1458          */
1459         DBG(DBG_LPU, NULL, "lpu_init - LPU_GIGABLAZE_GLUE_CONFIG3: 0x%llx\n",
1460             CSR_XR(csr_base, LPU_GIGABLAZE_GLUE_CONFIG3));
1461 
1462         /*
1463          * CSR_V LPU_GIGABLAZE_GLUE_CONFIG4 Expect OBP 0x1E848
1464          */
1465         DBG(DBG_LPU, NULL, "lpu_init - LPU_GIGABLAZE_GLUE_CONFIG4: 0x%llx\n",
1466             CSR_XR(csr_base, LPU_GIGABLAZE_GLUE_CONFIG4));
1467 
1468         /*
1469          * CSR_V LPU_GIGABLAZE_GLUE_STATUS Expect OBP 0x0
1470          */
1471         DBG(DBG_LPU, NULL, "lpu_init - LPU_GIGABLAZE_GLUE_STATUS: 0x%llx\n",
1472             CSR_XR(csr_base, LPU_GIGABLAZE_GLUE_STATUS));
1473 
1474         /*
1475          * CSR_V LPU_GIGABLAZE_GLUE_INTERRUPT_AND_STATUS_TEST Expect OBP 0x0
1476          */
1477         DBG(DBG_LPU, NULL, "lpu_init - "
1478             "LPU_GIGABLAZE_GLUE_INTERRUPT_AND_STATUS_TEST: 0x%llx\n",
1479             CSR_XR(csr_base,
1480             LPU_GIGABLAZE_GLUE_INTERRUPT_AND_STATUS_TEST));
1481 
1482         /*
1483          * CSR_V LPU GIGABLASE LAYER interrupt regs (mask, status)
1484          */
1485         DBG(DBG_LPU, NULL,
1486             "lpu_init - LPU_GIGABLAZE_GLUE_INTERRUPT_MASK: 0x%llx\n",
1487             CSR_XR(csr_base, LPU_GIGABLAZE_GLUE_INTERRUPT_MASK));
1488 
1489         DBG(DBG_LPU, NULL,
1490             "lpu_init - LPU_GIGABLAZE_GLUE_INTERRUPT_AND_STATUS: 0x%llx\n",
1491             CSR_XR(csr_base, LPU_GIGABLAZE_GLUE_INTERRUPT_AND_STATUS));
1492 
1493         /*
1494          * CSR_V LPU_GIGABLAZE_GLUE_POWER_DOWN1 Expect HW 0x0
1495          */
1496         DBG(DBG_LPU, NULL,
1497             "lpu_init - LPU_GIGABLAZE_GLUE_POWER_DOWN1: 0x%llx\n",
1498             CSR_XR(csr_base, LPU_GIGABLAZE_GLUE_POWER_DOWN1));
1499 
1500         /*
1501          * CSR_V LPU_GIGABLAZE_GLUE_POWER_DOWN2 Expect HW 0x0
1502          */
1503         DBG(DBG_LPU, NULL,
1504             "lpu_init - LPU_GIGABLAZE_GLUE_POWER_DOWN2: 0x%llx\n",
1505             CSR_XR(csr_base, LPU_GIGABLAZE_GLUE_POWER_DOWN2));
1506 
1507         /*
1508          * CSR_V LPU_GIGABLAZE_GLUE_CONFIG5 Expect OBP 0x0
1509          */
1510         DBG(DBG_LPU, NULL, "lpu_init - LPU_GIGABLAZE_GLUE_CONFIG5: 0x%llx\n",
1511             CSR_XR(csr_base, LPU_GIGABLAZE_GLUE_CONFIG5));
1512 }
1513 
1514 /* ARGSUSED */
1515 static void
1516 dlu_init(caddr_t csr_base, pxu_t *pxu_p)
1517 {
1518 uint64_t val;
1519 
1520         CSR_XS(csr_base, DLU_INTERRUPT_MASK, 0ull);
1521         DBG(DBG_TLU, NULL, "dlu_init - DLU_INTERRUPT_MASK: 0x%llx\n",
1522             CSR_XR(csr_base, DLU_INTERRUPT_MASK));
1523 
1524         val = (1ull << DLU_LINK_LAYER_CONFIG_VC0_EN);
1525         CSR_XS(csr_base, DLU_LINK_LAYER_CONFIG, val);
1526         DBG(DBG_TLU, NULL, "dlu_init - DLU_LINK_LAYER_CONFIG: 0x%llx\n",
1527             CSR_XR(csr_base, DLU_LINK_LAYER_CONFIG));
1528 
1529         val = (1ull << DLU_FLOW_CONTROL_UPDATE_CONTROL_FC0_U_NP_EN) |
1530             (1ull << DLU_FLOW_CONTROL_UPDATE_CONTROL_FC0_U_P_EN);
1531 
1532         CSR_XS(csr_base, DLU_FLOW_CONTROL_UPDATE_CONTROL, val);
1533         DBG(DBG_TLU, NULL, "dlu_init - DLU_FLOW_CONTROL_UPDATE_CONTROL: "
1534             "0x%llx\n", CSR_XR(csr_base, DLU_FLOW_CONTROL_UPDATE_CONTROL));
1535 
1536         val = (DLU_TXLINK_REPLAY_TIMER_THRESHOLD_DEFAULT <<
1537             DLU_TXLINK_REPLAY_TIMER_THRESHOLD_RPLAY_TMR_THR);
1538 
1539         CSR_XS(csr_base, DLU_TXLINK_REPLAY_TIMER_THRESHOLD, val);
1540 
1541         DBG(DBG_TLU, NULL, "dlu_init - DLU_TXLINK_REPLAY_TIMER_THRESHOLD: "
1542             "0x%llx\n", CSR_XR(csr_base, DLU_TXLINK_REPLAY_TIMER_THRESHOLD));
1543 }
1544 
1545 /* ARGSUSED */
1546 static void
1547 dmc_init(caddr_t csr_base, pxu_t *pxu_p)
1548 {
1549         uint64_t val;
1550 
1551 /*
1552  * CSR_V DMC_CORE_AND_BLOCK_INTERRUPT_ENABLE Expect OBP 0x8000000000000003
1553  */
1554 
1555         val = -1ull;
1556         CSR_XS(csr_base, DMC_CORE_AND_BLOCK_INTERRUPT_ENABLE, val);
1557         DBG(DBG_DMC, NULL,
1558             "dmc_init - DMC_CORE_AND_BLOCK_INTERRUPT_ENABLE: 0x%llx\n",
1559             CSR_XR(csr_base, DMC_CORE_AND_BLOCK_INTERRUPT_ENABLE));
1560 
1561         /*
1562          * CSR_V DMC_CORE_AND_BLOCK_ERROR_STATUS Expect HW 0x0
1563          */
1564         DBG(DBG_DMC, NULL,
1565             "dmc_init - DMC_CORE_AND_BLOCK_ERROR_STATUS: 0x%llx\n",
1566             CSR_XR(csr_base, DMC_CORE_AND_BLOCK_ERROR_STATUS));
1567 
1568         /*
1569          * CSR_V DMC_DEBUG_SELECT_FOR_PORT_A Expect HW 0x0
1570          */
1571         val = 0x0ull;
1572         CSR_XS(csr_base, DMC_DEBUG_SELECT_FOR_PORT_A, val);
1573         DBG(DBG_DMC, NULL, "dmc_init - DMC_DEBUG_SELECT_FOR_PORT_A: 0x%llx\n",
1574             CSR_XR(csr_base, DMC_DEBUG_SELECT_FOR_PORT_A));
1575 
1576         /*
1577          * CSR_V DMC_DEBUG_SELECT_FOR_PORT_B Expect HW 0x0
1578          */
1579         val = 0x0ull;
1580         CSR_XS(csr_base, DMC_DEBUG_SELECT_FOR_PORT_B, val);
1581         DBG(DBG_DMC, NULL, "dmc_init - DMC_DEBUG_SELECT_FOR_PORT_B: 0x%llx\n",
1582             CSR_XR(csr_base, DMC_DEBUG_SELECT_FOR_PORT_B));
1583 }
1584 
1585 void
1586 hvio_pec_init(caddr_t csr_base, pxu_t *pxu_p)
1587 {
1588         uint64_t val;
1589 
1590         ilu_init(csr_base, pxu_p);
1591         tlu_init(csr_base, pxu_p);
1592 
1593         switch (PX_CHIP_TYPE(pxu_p)) {
1594         case PX_CHIP_OBERON:
1595                 dlu_init(csr_base, pxu_p);
1596                 break;
1597         case PX_CHIP_FIRE:
1598                 lpu_init(csr_base, pxu_p);
1599                 break;
1600         default:
1601                 DBG(DBG_PEC, NULL, "hvio_pec_init - unknown chip type: 0x%x\n",
1602                     PX_CHIP_TYPE(pxu_p));
1603                 break;
1604         }
1605 
1606         dmc_init(csr_base, pxu_p);
1607 
1608 /*
1609  * CSR_V PEC_CORE_AND_BLOCK_INTERRUPT_ENABLE Expect Kernel 0x800000000000000F
1610  */
1611 
1612         val = -1ull;
1613         CSR_XS(csr_base, PEC_CORE_AND_BLOCK_INTERRUPT_ENABLE, val);
1614         DBG(DBG_PEC, NULL,
1615             "hvio_pec_init - PEC_CORE_AND_BLOCK_INTERRUPT_ENABLE: 0x%llx\n",
1616             CSR_XR(csr_base, PEC_CORE_AND_BLOCK_INTERRUPT_ENABLE));
1617 
1618         /*
1619          * CSR_V PEC_CORE_AND_BLOCK_INTERRUPT_STATUS Expect HW 0x0
1620          */
1621         DBG(DBG_PEC, NULL,
1622             "hvio_pec_init - PEC_CORE_AND_BLOCK_INTERRUPT_STATUS: 0x%llx\n",
1623             CSR_XR(csr_base, PEC_CORE_AND_BLOCK_INTERRUPT_STATUS));
1624 }
1625 
1626 /*
1627  * Convert a TTE to physical address
1628  */
1629 static r_addr_t
1630 mmu_tte_to_pa(uint64_t tte, pxu_t *pxu_p)
1631 {
1632         uint64_t pa_mask;
1633 
1634         switch (PX_CHIP_TYPE(pxu_p)) {
1635         case PX_CHIP_OBERON:
1636                 pa_mask = MMU_OBERON_PADDR_MASK;
1637                 break;
1638         case PX_CHIP_FIRE:
1639                 pa_mask = MMU_FIRE_PADDR_MASK;
1640                 break;
1641         default:
1642                 DBG(DBG_MMU, NULL, "mmu_tte_to_pa - unknown chip type: 0x%x\n",
1643                     PX_CHIP_TYPE(pxu_p));
1644                 pa_mask = 0;
1645                 break;
1646         }
1647         return ((tte & pa_mask) >> MMU_PAGE_SHIFT);
1648 }
1649 
1650 /*
1651  * Return MMU bypass noncache bit for chip
1652  */
1653 static r_addr_t
1654 mmu_bypass_noncache(pxu_t *pxu_p)
1655 {
1656         r_addr_t bypass_noncache_bit;
1657 
1658         switch (PX_CHIP_TYPE(pxu_p)) {
1659         case PX_CHIP_OBERON:
1660                 bypass_noncache_bit = MMU_OBERON_BYPASS_NONCACHE;
1661                 break;
1662         case PX_CHIP_FIRE:
1663                 bypass_noncache_bit = MMU_FIRE_BYPASS_NONCACHE;
1664                 break;
1665         default:
1666                 DBG(DBG_MMU, NULL,
1667                     "mmu_bypass_nocache - unknown chip type: 0x%x\n",
1668                     PX_CHIP_TYPE(pxu_p));
1669                 bypass_noncache_bit = 0;
1670                 break;
1671         }
1672         return (bypass_noncache_bit);
1673 }
1674 
1675 /*
1676  * Calculate number of TSB entries for the chip.
1677  */
1678 /* ARGSUSED */
1679 static uint_t
1680 mmu_tsb_entries(caddr_t csr_base, pxu_t *pxu_p)
1681 {
1682         uint64_t tsb_ctrl;
1683         uint_t obp_tsb_entries, obp_tsb_size;
1684 
1685         tsb_ctrl = CSR_XR(csr_base, MMU_TSB_CONTROL);
1686 
1687         obp_tsb_size = tsb_ctrl & 0xF;
1688 
1689         obp_tsb_entries = MMU_TSBSIZE_TO_TSBENTRIES(obp_tsb_size);
1690 
1691         return (obp_tsb_entries);
1692 }
1693 
1694 /*
1695  * Initialize the module, but do not enable interrupts.
1696  */
1697 void
1698 hvio_mmu_init(caddr_t csr_base, pxu_t *pxu_p)
1699 {
1700         uint64_t        val, i, obp_tsb_pa;
1701         uint_t obp_tsb_entries;
1702 
1703         bzero(pxu_p->tsb_vaddr, pxu_p->tsb_size);
1704 
1705         /*
1706          * Preserve OBP's TSB
1707          */
1708         obp_tsb_pa = CSR_XR(csr_base, MMU_TSB_CONTROL) & MMU_TSB_PA_MASK;
1709 
1710         obp_tsb_entries = mmu_tsb_entries(csr_base, pxu_p);
1711 
1712         /* save "shape" of OBP's TSB for use during Detach */
1713         pxu_p->obp_tsb_paddr = obp_tsb_pa;
1714         pxu_p->obp_tsb_entries = obp_tsb_entries;
1715 
1716         /* For each Valid TTE in OBP's TSB, save its value in px's IOTSB */
1717         hvio_obptsb_attach(pxu_p);
1718 
1719         /*
1720          * Invalidate the TLB through the diagnostic register.
1721          */
1722 
1723         CSR_XS(csr_base, MMU_TTE_CACHE_INVALIDATE, -1ull);
1724 
1725         /*
1726          * Configure the Fire MMU TSB Control Register.  Determine
1727          * the encoding for either 8KB pages (0) or 64KB pages (1).
1728          *
1729          * Write the most significant 30 bits of the TSB physical address
1730          * and the encoded TSB table size.
1731          */
1732         for (i = 8; i && (pxu_p->tsb_size < (0x2000 << i)); i--)
1733                 ;
1734 
1735         val = (((((va_to_pa(pxu_p->tsb_vaddr)) >> 13) << 13) |
1736             ((MMU_PAGE_SHIFT == 13) ? 0 : 1) << 8) | i);
1737 
1738         CSR_XS(csr_base, MMU_TSB_CONTROL, val);
1739 
1740         /*
1741          * Enable the MMU, set the "TSB Cache Snoop Enable",
1742          * the "Cache Mode", the "Bypass Enable" and
1743          * the "Translation Enable" bits.
1744          */
1745         val = CSR_XR(csr_base, MMU_CONTROL_AND_STATUS);
1746         val |= ((1ull << MMU_CONTROL_AND_STATUS_SE)
1747             |  (MMU_CONTROL_AND_STATUS_ROE_BIT63_ENABLE <<
1748             MMU_CONTROL_AND_STATUS_ROE)
1749             | (MMU_CONTROL_AND_STATUS_CM_MASK << MMU_CONTROL_AND_STATUS_CM)
1750             | (1ull << MMU_CONTROL_AND_STATUS_BE)
1751             | (1ull << MMU_CONTROL_AND_STATUS_TE));
1752 
1753         CSR_XS(csr_base, MMU_CONTROL_AND_STATUS, val);
1754 
1755         /*
1756          * Read the register here to ensure that the previous writes to
1757          * the Fire MMU registers have been flushed.  (Technically, this
1758          * is not entirely necessary here as we will likely do later reads
1759          * during Fire initialization, but it is a small price to pay for
1760          * more modular code.)
1761          */
1762         (void) CSR_XR(csr_base, MMU_CONTROL_AND_STATUS);
1763 
1764         /*
1765          * CSR_V TLU's UE interrupt regs (log, enable, status, clear)
1766          * Plus header logs
1767          */
1768         DBG(DBG_MMU, NULL, "mmu_init - MMU_ERROR_LOG_ENABLE: 0x%llx\n",
1769             CSR_XR(csr_base, MMU_ERROR_LOG_ENABLE));
1770 
1771         DBG(DBG_MMU, NULL, "mmu_init - MMU_INTERRUPT_ENABLE: 0x%llx\n",
1772             CSR_XR(csr_base, MMU_INTERRUPT_ENABLE));
1773 
1774         DBG(DBG_MMU, NULL, "mmu_init - MMU_INTERRUPT_STATUS: 0x%llx\n",
1775             CSR_XR(csr_base, MMU_INTERRUPT_STATUS));
1776 
1777         DBG(DBG_MMU, NULL, "mmu_init - MMU_ERROR_STATUS_CLEAR: 0x%llx\n",
1778             CSR_XR(csr_base, MMU_ERROR_STATUS_CLEAR));
1779 }
1780 
1781 /*
1782  * Generic IOMMU Servies
1783  */
1784 
1785 /* ARGSUSED */
1786 uint64_t
1787 hvio_iommu_map(devhandle_t dev_hdl, pxu_t *pxu_p, tsbid_t tsbid, pages_t pages,
1788     io_attributes_t io_attr, void *addr, size_t pfn_index, int flags)
1789 {
1790         tsbindex_t      tsb_index = PCI_TSBID_TO_TSBINDEX(tsbid);
1791         uint64_t        attr = MMU_TTE_V;
1792         int             i;
1793 
1794         if (io_attr & PCI_MAP_ATTR_WRITE)
1795                 attr |= MMU_TTE_W;
1796 
1797         if ((PX_CHIP_TYPE(pxu_p) == PX_CHIP_OBERON) &&
1798             (io_attr & PCI_MAP_ATTR_RO))
1799                 attr |= MMU_TTE_RO;
1800 
1801         if (attr & MMU_TTE_RO) {
1802                 DBG(DBG_MMU, NULL, "hvio_iommu_map: pfn_index=0x%x "
1803                     "pages=0x%x attr = 0x%lx\n", pfn_index, pages, attr);
1804         }
1805 
1806         if (flags & MMU_MAP_PFN) {
1807                 ddi_dma_impl_t  *mp = (ddi_dma_impl_t *)addr;
1808                 for (i = 0; i < pages; i++, pfn_index++, tsb_index++) {
1809                         px_iopfn_t pfn = PX_GET_MP_PFN(mp, pfn_index);
1810                         pxu_p->tsb_vaddr[tsb_index] = MMU_PTOB(pfn) | attr;
1811 
1812                         /*
1813                          * Oberon will need to flush the corresponding TTEs in
1814                          * Cache. We only need to flush every cache line.
1815                          * Extra PIO's are expensive.
1816                          */
1817                         if (PX_CHIP_TYPE(pxu_p) == PX_CHIP_OBERON) {
1818                                 if ((i == (pages-1))||!((tsb_index+1) & 0x7)) {
1819                                         CSR_XS(dev_hdl,
1820                                             MMU_TTE_CACHE_FLUSH_ADDRESS,
1821                                             (pxu_p->tsb_paddr+
1822                                             (tsb_index*MMU_TTE_SIZE)));
1823                                 }
1824                         }
1825                 }
1826         } else {
1827                 caddr_t a = (caddr_t)addr;
1828                 for (i = 0; i < pages; i++, a += MMU_PAGE_SIZE, tsb_index++) {
1829                         px_iopfn_t pfn = hat_getpfnum(kas.a_hat, a);
1830                         pxu_p->tsb_vaddr[tsb_index] = MMU_PTOB(pfn) | attr;
1831 
1832                         /*
1833                          * Oberon will need to flush the corresponding TTEs in
1834                          * Cache. We only need to flush every cache line.
1835                          * Extra PIO's are expensive.
1836                          */
1837                         if (PX_CHIP_TYPE(pxu_p) == PX_CHIP_OBERON) {
1838                                 if ((i == (pages-1))||!((tsb_index+1) & 0x7)) {
1839                                         CSR_XS(dev_hdl,
1840                                             MMU_TTE_CACHE_FLUSH_ADDRESS,
1841                                             (pxu_p->tsb_paddr+
1842                                             (tsb_index*MMU_TTE_SIZE)));
1843                                 }
1844                         }
1845                 }
1846         }
1847 
1848         return (H_EOK);
1849 }
1850 
1851 /* ARGSUSED */
1852 uint64_t
1853 hvio_iommu_demap(devhandle_t dev_hdl, pxu_t *pxu_p, tsbid_t tsbid,
1854     pages_t pages)
1855 {
1856         tsbindex_t      tsb_index = PCI_TSBID_TO_TSBINDEX(tsbid);
1857         int             i;
1858 
1859         for (i = 0; i < pages; i++, tsb_index++) {
1860                 pxu_p->tsb_vaddr[tsb_index] = MMU_INVALID_TTE;
1861 
1862                         /*
1863                          * Oberon will need to flush the corresponding TTEs in
1864                          * Cache. We only need to flush every cache line.
1865                          * Extra PIO's are expensive.
1866                          */
1867                         if (PX_CHIP_TYPE(pxu_p) == PX_CHIP_OBERON) {
1868                                 if ((i == (pages-1))||!((tsb_index+1) & 0x7)) {
1869                                         CSR_XS(dev_hdl,
1870                                             MMU_TTE_CACHE_FLUSH_ADDRESS,
1871                                             (pxu_p->tsb_paddr+
1872                                             (tsb_index*MMU_TTE_SIZE)));
1873                                 }
1874                         }
1875         }
1876 
1877         return (H_EOK);
1878 }
1879 
1880 /* ARGSUSED */
1881 uint64_t
1882 hvio_iommu_getmap(devhandle_t dev_hdl, pxu_t *pxu_p, tsbid_t tsbid,
1883     io_attributes_t *attr_p, r_addr_t *r_addr_p)
1884 {
1885         tsbindex_t      tsb_index = PCI_TSBID_TO_TSBINDEX(tsbid);
1886         uint64_t        *tte_addr;
1887         uint64_t        ret = H_EOK;
1888 
1889         tte_addr = (uint64_t *)(pxu_p->tsb_vaddr) + tsb_index;
1890 
1891         if (*tte_addr & MMU_TTE_V) {
1892                 *r_addr_p = mmu_tte_to_pa(*tte_addr, pxu_p);
1893                 *attr_p = (*tte_addr & MMU_TTE_W) ?
1894                     PCI_MAP_ATTR_WRITE:PCI_MAP_ATTR_READ;
1895         } else {
1896                 *r_addr_p = 0;
1897                 *attr_p = 0;
1898                 ret = H_ENOMAP;
1899         }
1900 
1901         return (ret);
1902 }
1903 
1904 /*
1905  * Copy each Valid OBP TTE from OBP's IOTSB to px's IOTSB.
1906  */
1907 void
1908 hvio_obptsb_attach(pxu_t *pxu_p)
1909 {
1910         uint64_t        obp_tsb_pa;
1911         uint64_t        *base_tte_addr;
1912         uint64_t        i;
1913         uint_t          obp_tsb_entries;
1914 
1915         obp_tsb_pa = pxu_p->obp_tsb_paddr;
1916         obp_tsb_entries = pxu_p->obp_tsb_entries;
1917 
1918         /*
1919          * Compute the starting addr of the area reserved for
1920          * OBP's TTEs; OBP's TTEs are stored at the highest addrs
1921          * of px's IOTSB.
1922          */
1923         base_tte_addr = pxu_p->tsb_vaddr +
1924             ((pxu_p->tsb_size >> 3) - obp_tsb_entries);
1925 
1926         for (i = 0; i < obp_tsb_entries; i++) {
1927                 uint64_t tte = lddphys(obp_tsb_pa + i * 8);
1928 
1929                 if (!MMU_TTE_VALID(tte))
1930                         continue;
1931 
1932                 base_tte_addr[i] = tte;
1933         }
1934 }
1935 
1936 /*
1937  * For each Valid OBP TTE, deallocate space from the vmem Arena used
1938  * to manage the TTE's associated DVMA addr space.  (Allocation from
1939  * the DVMA Arena was done in px_mmu_attach).
1940  */
1941 void
1942 hvio_obptsb_detach(px_t *px_p)
1943 {
1944         uint64_t        obp_tsb_pa;
1945         uint64_t        i;
1946         uint_t          obp_tsb_entries;
1947         uint_t          obp_tsb_bias;
1948         px_mmu_t        *mmu_p = px_p->px_mmu_p;
1949         vmem_t          *dvma_map;
1950         pxu_t           *pxu_p = (pxu_t *)px_p->px_plat_p;
1951 
1952         dvma_map = mmu_p->mmu_dvma_map;
1953 
1954         obp_tsb_pa = pxu_p->obp_tsb_paddr;
1955         obp_tsb_entries = pxu_p->obp_tsb_entries;
1956         /*
1957          * OBP's TTEs are located at the high end of px's IOTSB.
1958          * Equivalently, OBP's DVMA space is allocated at the high end
1959          * of px's DVMA space.  Compute the bias that references
1960          * OBP's first possible page of DVMA space.
1961          */
1962         obp_tsb_bias = (pxu_p->tsb_size >> 3) - obp_tsb_entries;
1963 
1964         for (i = 0; i < obp_tsb_entries; i++) {
1965                 caddr_t va;
1966                 uint64_t tte = lddphys(obp_tsb_pa + i * 8);
1967 
1968                 if (!MMU_TTE_VALID(tte))
1969                         continue;
1970 
1971                 /* deallocate the TTE's associated page of DVMA space */
1972                 va = (caddr_t)(MMU_PTOB(mmu_p->dvma_base_pg + obp_tsb_bias +
1973                     i));
1974                 vmem_xfree(dvma_map, va, MMU_PAGE_SIZE);
1975         }
1976 }
1977 
1978 /* ARGSUSED */
1979 uint64_t
1980 hvio_get_bypass_base(pxu_t *pxu_p)
1981 {
1982         uint64_t base;
1983 
1984         switch (PX_CHIP_TYPE(pxu_p)) {
1985         case PX_CHIP_OBERON:
1986                 base = MMU_OBERON_BYPASS_BASE;
1987                 break;
1988         case PX_CHIP_FIRE:
1989                 base = MMU_FIRE_BYPASS_BASE;
1990                 break;
1991         default:
1992                 DBG(DBG_MMU, NULL,
1993                     "hvio_get_bypass_base - unknown chip type: 0x%x\n",
1994                     PX_CHIP_TYPE(pxu_p));
1995                 base = 0;
1996                 break;
1997         }
1998         return (base);
1999 }
2000 
2001 /* ARGSUSED */
2002 uint64_t
2003 hvio_get_bypass_end(pxu_t *pxu_p)
2004 {
2005         uint64_t end;
2006 
2007         switch (PX_CHIP_TYPE(pxu_p)) {
2008         case PX_CHIP_OBERON:
2009                 end = MMU_OBERON_BYPASS_END;
2010                 break;
2011         case PX_CHIP_FIRE:
2012                 end = MMU_FIRE_BYPASS_END;
2013                 break;
2014         default:
2015                 DBG(DBG_MMU, NULL,
2016                     "hvio_get_bypass_end - unknown chip type: 0x%x\n",
2017                     PX_CHIP_TYPE(pxu_p));
2018                 end = 0;
2019                 break;
2020         }
2021         return (end);
2022 }
2023 
2024 /* ARGSUSED */
2025 uint64_t
2026 hvio_iommu_getbypass(devhandle_t dev_hdl, pxu_t *pxu_p, r_addr_t ra,
2027     io_attributes_t attr, io_addr_t *io_addr_p)
2028 {
2029         uint64_t        pfn = MMU_BTOP(ra);
2030 
2031         *io_addr_p = hvio_get_bypass_base(pxu_p) | ra |
2032             (pf_is_memory(pfn) ? 0 : mmu_bypass_noncache(pxu_p));
2033 
2034         return (H_EOK);
2035 }
2036 
2037 /*
2038  * Generic IO Interrupt Servies
2039  */
2040 
2041 /*
2042  * Converts a device specific interrupt number given by the
2043  * arguments devhandle and devino into a system specific ino.
2044  */
2045 /* ARGSUSED */
2046 uint64_t
2047 hvio_intr_devino_to_sysino(devhandle_t dev_hdl, pxu_t *pxu_p, devino_t devino,
2048     sysino_t *sysino)
2049 {
2050         if (devino > INTERRUPT_MAPPING_ENTRIES) {
2051                 DBG(DBG_IB, NULL, "ino %x is invalid\n", devino);
2052                 return (H_ENOINTR);
2053         }
2054 
2055         *sysino = DEVINO_TO_SYSINO(pxu_p->portid, devino);
2056 
2057         return (H_EOK);
2058 }
2059 
2060 /*
2061  * Returns state in intr_valid_state if the interrupt defined by sysino
2062  * is valid (enabled) or not-valid (disabled).
2063  */
2064 uint64_t
2065 hvio_intr_getvalid(devhandle_t dev_hdl, sysino_t sysino,
2066     intr_valid_state_t *intr_valid_state)
2067 {
2068         if (CSRA_BR((caddr_t)dev_hdl, INTERRUPT_MAPPING,
2069             SYSINO_TO_DEVINO(sysino), ENTRIES_V)) {
2070                 *intr_valid_state = INTR_VALID;
2071         } else {
2072                 *intr_valid_state = INTR_NOTVALID;
2073         }
2074 
2075         return (H_EOK);
2076 }
2077 
2078 /*
2079  * Sets the 'valid' state of the interrupt defined by
2080  * the argument sysino to the state defined by the
2081  * argument intr_valid_state.
2082  */
2083 uint64_t
2084 hvio_intr_setvalid(devhandle_t dev_hdl, sysino_t sysino,
2085     intr_valid_state_t intr_valid_state)
2086 {
2087         switch (intr_valid_state) {
2088         case INTR_VALID:
2089                 CSRA_BS((caddr_t)dev_hdl, INTERRUPT_MAPPING,
2090                     SYSINO_TO_DEVINO(sysino), ENTRIES_V);
2091                 break;
2092         case INTR_NOTVALID:
2093                 CSRA_BC((caddr_t)dev_hdl, INTERRUPT_MAPPING,
2094                     SYSINO_TO_DEVINO(sysino), ENTRIES_V);
2095                 break;
2096         default:
2097                 return (EINVAL);
2098         }
2099 
2100         return (H_EOK);
2101 }
2102 
2103 /*
2104  * Returns the current state of the interrupt given by the sysino
2105  * argument.
2106  */
2107 uint64_t
2108 hvio_intr_getstate(devhandle_t dev_hdl, sysino_t sysino,
2109     intr_state_t *intr_state)
2110 {
2111         intr_state_t state;
2112 
2113         state = CSRA_FR((caddr_t)dev_hdl, INTERRUPT_CLEAR,
2114             SYSINO_TO_DEVINO(sysino), ENTRIES_INT_STATE);
2115 
2116         switch (state) {
2117         case INTERRUPT_IDLE_STATE:
2118                 *intr_state = INTR_IDLE_STATE;
2119                 break;
2120         case INTERRUPT_RECEIVED_STATE:
2121                 *intr_state = INTR_RECEIVED_STATE;
2122                 break;
2123         case INTERRUPT_PENDING_STATE:
2124                 *intr_state = INTR_DELIVERED_STATE;
2125                 break;
2126         default:
2127                 return (EINVAL);
2128         }
2129 
2130         return (H_EOK);
2131 
2132 }
2133 
2134 /*
2135  * Sets the current state of the interrupt given by the sysino
2136  * argument to the value given in the argument intr_state.
2137  *
2138  * Note: Setting the state to INTR_IDLE clears any pending
2139  * interrupt for sysino.
2140  */
2141 uint64_t
2142 hvio_intr_setstate(devhandle_t dev_hdl, sysino_t sysino,
2143     intr_state_t intr_state)
2144 {
2145         intr_state_t state;
2146 
2147         switch (intr_state) {
2148         case INTR_IDLE_STATE:
2149                 state = INTERRUPT_IDLE_STATE;
2150                 break;
2151         case INTR_DELIVERED_STATE:
2152                 state = INTERRUPT_PENDING_STATE;
2153                 break;
2154         default:
2155                 return (EINVAL);
2156         }
2157 
2158         CSRA_FS((caddr_t)dev_hdl, INTERRUPT_CLEAR,
2159             SYSINO_TO_DEVINO(sysino), ENTRIES_INT_STATE, state);
2160 
2161         return (H_EOK);
2162 }
2163 
2164 /*
2165  * Returns the cpuid that is the current target of the
2166  * interrupt given by the sysino argument.
2167  *
2168  * The cpuid value returned is undefined if the target
2169  * has not been set via intr_settarget.
2170  */
2171 uint64_t
2172 hvio_intr_gettarget(devhandle_t dev_hdl, pxu_t *pxu_p, sysino_t sysino,
2173     cpuid_t *cpuid)
2174 {
2175         switch (PX_CHIP_TYPE(pxu_p)) {
2176         case PX_CHIP_OBERON:
2177                 *cpuid = CSRA_FR((caddr_t)dev_hdl, INTERRUPT_MAPPING,
2178                     SYSINO_TO_DEVINO(sysino), ENTRIES_T_DESTID);
2179                 break;
2180         case PX_CHIP_FIRE:
2181                 *cpuid = CSRA_FR((caddr_t)dev_hdl, INTERRUPT_MAPPING,
2182                     SYSINO_TO_DEVINO(sysino), ENTRIES_T_JPID);
2183                 break;
2184         default:
2185                 DBG(DBG_CB, NULL, "hvio_intr_gettarget - "
2186                     "unknown chip type: 0x%x\n", PX_CHIP_TYPE(pxu_p));
2187                 return (EINVAL);
2188         }
2189 
2190         return (H_EOK);
2191 }
2192 
2193 /*
2194  * Set the target cpu for the interrupt defined by the argument
2195  * sysino to the target cpu value defined by the argument cpuid.
2196  */
2197 uint64_t
2198 hvio_intr_settarget(devhandle_t dev_hdl, pxu_t *pxu_p, sysino_t sysino,
2199     cpuid_t cpuid)
2200 {
2201         uint64_t        val, intr_controller;
2202         uint32_t        ino = SYSINO_TO_DEVINO(sysino);
2203 
2204         /*
2205          * For now, we assign interrupt controller in a round
2206          * robin fashion.  Later, we may need to come up with
2207          * a more efficient assignment algorithm.
2208          */
2209         intr_controller = 0x1ull << (cpuid % 4);
2210 
2211         switch (PX_CHIP_TYPE(pxu_p)) {
2212         case PX_CHIP_OBERON:
2213                 val = (((cpuid &
2214                     INTERRUPT_MAPPING_ENTRIES_T_DESTID_MASK) <<
2215                     INTERRUPT_MAPPING_ENTRIES_T_DESTID) |
2216                     ((intr_controller &
2217                     INTERRUPT_MAPPING_ENTRIES_INT_CNTRL_NUM_MASK)
2218                     << INTERRUPT_MAPPING_ENTRIES_INT_CNTRL_NUM));
2219                 break;
2220         case PX_CHIP_FIRE:
2221                 val = (((cpuid & INTERRUPT_MAPPING_ENTRIES_T_JPID_MASK) <<
2222                     INTERRUPT_MAPPING_ENTRIES_T_JPID) |
2223                     ((intr_controller &
2224                     INTERRUPT_MAPPING_ENTRIES_INT_CNTRL_NUM_MASK)
2225                     << INTERRUPT_MAPPING_ENTRIES_INT_CNTRL_NUM));
2226                 break;
2227         default:
2228                 DBG(DBG_CB, NULL, "hvio_intr_settarget - "
2229                     "unknown chip type: 0x%x\n", PX_CHIP_TYPE(pxu_p));
2230                 return (EINVAL);
2231         }
2232 
2233         /* For EQ interrupts, set DATA MONDO bit */
2234         if ((ino >= EQ_1ST_DEVINO) && (ino < (EQ_1ST_DEVINO + EQ_CNT)))
2235                 val |= (0x1ull << INTERRUPT_MAPPING_ENTRIES_MDO_MODE);
2236 
2237         CSRA_XS((caddr_t)dev_hdl, INTERRUPT_MAPPING, ino, val);
2238 
2239         return (H_EOK);
2240 }
2241 
2242 /*
2243  * MSIQ Functions:
2244  */
2245 uint64_t
2246 hvio_msiq_init(devhandle_t dev_hdl, pxu_t *pxu_p)
2247 {
2248         CSRA_XS((caddr_t)dev_hdl, EVENT_QUEUE_BASE_ADDRESS, 0,
2249             (uint64_t)pxu_p->msiq_mapped_p);
2250         DBG(DBG_IB, NULL,
2251             "hvio_msiq_init: EVENT_QUEUE_BASE_ADDRESS 0x%llx\n",
2252             CSR_XR((caddr_t)dev_hdl, EVENT_QUEUE_BASE_ADDRESS));
2253 
2254         CSRA_XS((caddr_t)dev_hdl, INTERRUPT_MONDO_DATA_0, 0,
2255             (uint64_t)ID_TO_IGN(PX_CHIP_TYPE(pxu_p),
2256             pxu_p->portid) << INO_BITS);
2257         DBG(DBG_IB, NULL, "hvio_msiq_init: "
2258             "INTERRUPT_MONDO_DATA_0: 0x%llx\n",
2259             CSR_XR((caddr_t)dev_hdl, INTERRUPT_MONDO_DATA_0));
2260 
2261         return (H_EOK);
2262 }
2263 
2264 uint64_t
2265 hvio_msiq_getvalid(devhandle_t dev_hdl, msiqid_t msiq_id,
2266     pci_msiq_valid_state_t *msiq_valid_state)
2267 {
2268         uint32_t        eq_state;
2269         uint64_t        ret = H_EOK;
2270 
2271         eq_state = CSRA_FR((caddr_t)dev_hdl, EVENT_QUEUE_STATE,
2272             msiq_id, ENTRIES_STATE);
2273 
2274         switch (eq_state) {
2275         case EQ_IDLE_STATE:
2276                 *msiq_valid_state = PCI_MSIQ_INVALID;
2277                 break;
2278         case EQ_ACTIVE_STATE:
2279         case EQ_ERROR_STATE:
2280                 *msiq_valid_state = PCI_MSIQ_VALID;
2281                 break;
2282         default:
2283                 ret = H_EIO;
2284                 break;
2285         }
2286 
2287         return (ret);
2288 }
2289 
2290 uint64_t
2291 hvio_msiq_setvalid(devhandle_t dev_hdl, msiqid_t msiq_id,
2292     pci_msiq_valid_state_t msiq_valid_state)
2293 {
2294         uint64_t        ret = H_EOK;
2295 
2296         switch (msiq_valid_state) {
2297         case PCI_MSIQ_INVALID:
2298                 CSRA_BS((caddr_t)dev_hdl, EVENT_QUEUE_CONTROL_CLEAR,
2299                     msiq_id, ENTRIES_DIS);
2300                 break;
2301         case PCI_MSIQ_VALID:
2302                 CSRA_BS((caddr_t)dev_hdl, EVENT_QUEUE_CONTROL_SET,
2303                     msiq_id, ENTRIES_EN);
2304                 break;
2305         default:
2306                 ret = H_EINVAL;
2307                 break;
2308         }
2309 
2310         return (ret);
2311 }
2312 
2313 uint64_t
2314 hvio_msiq_getstate(devhandle_t dev_hdl, msiqid_t msiq_id,
2315     pci_msiq_state_t *msiq_state)
2316 {
2317         uint32_t        eq_state;
2318         uint64_t        ret = H_EOK;
2319 
2320         eq_state = CSRA_FR((caddr_t)dev_hdl, EVENT_QUEUE_STATE,
2321             msiq_id, ENTRIES_STATE);
2322 
2323         switch (eq_state) {
2324         case EQ_IDLE_STATE:
2325         case EQ_ACTIVE_STATE:
2326                 *msiq_state = PCI_MSIQ_STATE_IDLE;
2327                 break;
2328         case EQ_ERROR_STATE:
2329                 *msiq_state = PCI_MSIQ_STATE_ERROR;
2330                 break;
2331         default:
2332                 ret = H_EIO;
2333         }
2334 
2335         return (ret);
2336 }
2337 
2338 uint64_t
2339 hvio_msiq_setstate(devhandle_t dev_hdl, msiqid_t msiq_id,
2340     pci_msiq_state_t msiq_state)
2341 {
2342         uint32_t        eq_state;
2343         uint64_t        ret = H_EOK;
2344 
2345         eq_state = CSRA_FR((caddr_t)dev_hdl, EVENT_QUEUE_STATE,
2346             msiq_id, ENTRIES_STATE);
2347 
2348         switch (eq_state) {
2349         case EQ_IDLE_STATE:
2350                 if (msiq_state == PCI_MSIQ_STATE_ERROR)
2351                         ret = H_EIO;
2352                 break;
2353         case EQ_ACTIVE_STATE:
2354                 if (msiq_state == PCI_MSIQ_STATE_ERROR)
2355                         CSRA_BS((caddr_t)dev_hdl, EVENT_QUEUE_CONTROL_SET,
2356                             msiq_id, ENTRIES_ENOVERR);
2357                 else
2358                         ret = H_EIO;
2359                 break;
2360         case EQ_ERROR_STATE:
2361                 if (msiq_state == PCI_MSIQ_STATE_IDLE)
2362                         CSRA_BS((caddr_t)dev_hdl, EVENT_QUEUE_CONTROL_CLEAR,
2363                             msiq_id, ENTRIES_E2I);
2364                 else
2365                         ret = H_EIO;
2366                 break;
2367         default:
2368                 ret = H_EIO;
2369         }
2370 
2371         return (ret);
2372 }
2373 
2374 uint64_t
2375 hvio_msiq_gethead(devhandle_t dev_hdl, msiqid_t msiq_id,
2376     msiqhead_t *msiq_head)
2377 {
2378         *msiq_head = CSRA_FR((caddr_t)dev_hdl, EVENT_QUEUE_HEAD,
2379             msiq_id, ENTRIES_HEAD);
2380 
2381         return (H_EOK);
2382 }
2383 
2384 uint64_t
2385 hvio_msiq_sethead(devhandle_t dev_hdl, msiqid_t msiq_id,
2386     msiqhead_t msiq_head)
2387 {
2388         CSRA_FS((caddr_t)dev_hdl, EVENT_QUEUE_HEAD, msiq_id,
2389             ENTRIES_HEAD, msiq_head);
2390 
2391         return (H_EOK);
2392 }
2393 
2394 uint64_t
2395 hvio_msiq_gettail(devhandle_t dev_hdl, msiqid_t msiq_id,
2396     msiqtail_t *msiq_tail)
2397 {
2398         *msiq_tail = CSRA_FR((caddr_t)dev_hdl, EVENT_QUEUE_TAIL,
2399             msiq_id, ENTRIES_TAIL);
2400 
2401         return (H_EOK);
2402 }
2403 
2404 /*
2405  * MSI Functions:
2406  */
2407 uint64_t
2408 hvio_msi_init(devhandle_t dev_hdl, uint64_t addr32, uint64_t addr64)
2409 {
2410         /* PCI MEM 32 resources to perform 32 bit MSI transactions */
2411         CSRA_FS((caddr_t)dev_hdl, MSI_32_BIT_ADDRESS, 0,
2412             ADDR, (uint64_t)addr32 >> MSI_32_BIT_ADDRESS_ADDR);
2413         DBG(DBG_IB, NULL, "hvio_msi_init: MSI_32_BIT_ADDRESS: 0x%llx\n",
2414             CSR_XR((caddr_t)dev_hdl, MSI_32_BIT_ADDRESS));
2415 
2416         /* Reserve PCI MEM 64 resources to perform 64 bit MSI transactions */
2417         CSRA_FS((caddr_t)dev_hdl, MSI_64_BIT_ADDRESS, 0,
2418             ADDR, (uint64_t)addr64 >> MSI_64_BIT_ADDRESS_ADDR);
2419         DBG(DBG_IB, NULL, "hvio_msi_init: MSI_64_BIT_ADDRESS: 0x%llx\n",
2420             CSR_XR((caddr_t)dev_hdl, MSI_64_BIT_ADDRESS));
2421 
2422         return (H_EOK);
2423 }
2424 
2425 uint64_t
2426 hvio_msi_getmsiq(devhandle_t dev_hdl, msinum_t msi_num,
2427     msiqid_t *msiq_id)
2428 {
2429         *msiq_id = CSRA_FR((caddr_t)dev_hdl, MSI_MAPPING,
2430             msi_num, ENTRIES_EQNUM);
2431 
2432         return (H_EOK);
2433 }
2434 
2435 uint64_t
2436 hvio_msi_setmsiq(devhandle_t dev_hdl, msinum_t msi_num,
2437     msiqid_t msiq_id)
2438 {
2439         CSRA_FS((caddr_t)dev_hdl, MSI_MAPPING, msi_num,
2440             ENTRIES_EQNUM, msiq_id);
2441 
2442         return (H_EOK);
2443 }
2444 
2445 uint64_t
2446 hvio_msi_getvalid(devhandle_t dev_hdl, msinum_t msi_num,
2447     pci_msi_valid_state_t *msi_valid_state)
2448 {
2449         *msi_valid_state = CSRA_BR((caddr_t)dev_hdl, MSI_MAPPING,
2450             msi_num, ENTRIES_V);
2451 
2452         return (H_EOK);
2453 }
2454 
2455 uint64_t
2456 hvio_msi_setvalid(devhandle_t dev_hdl, msinum_t msi_num,
2457     pci_msi_valid_state_t msi_valid_state)
2458 {
2459         uint64_t        ret = H_EOK;
2460 
2461         switch (msi_valid_state) {
2462         case PCI_MSI_VALID:
2463                 CSRA_BS((caddr_t)dev_hdl, MSI_MAPPING, msi_num,
2464                     ENTRIES_V);
2465                 break;
2466         case PCI_MSI_INVALID:
2467                 CSRA_BC((caddr_t)dev_hdl, MSI_MAPPING, msi_num,
2468                     ENTRIES_V);
2469                 break;
2470         default:
2471                 ret = H_EINVAL;
2472         }
2473 
2474         return (ret);
2475 }
2476 
2477 uint64_t
2478 hvio_msi_getstate(devhandle_t dev_hdl, msinum_t msi_num,
2479     pci_msi_state_t *msi_state)
2480 {
2481         *msi_state = CSRA_BR((caddr_t)dev_hdl, MSI_MAPPING,
2482             msi_num, ENTRIES_EQWR_N);
2483 
2484         return (H_EOK);
2485 }
2486 
2487 uint64_t
2488 hvio_msi_setstate(devhandle_t dev_hdl, msinum_t msi_num,
2489     pci_msi_state_t msi_state)
2490 {
2491         uint64_t        ret = H_EOK;
2492 
2493         switch (msi_state) {
2494         case PCI_MSI_STATE_IDLE:
2495                 CSRA_BS((caddr_t)dev_hdl, MSI_CLEAR, msi_num,
2496                     ENTRIES_EQWR_N);
2497                 break;
2498         case PCI_MSI_STATE_DELIVERED:
2499         default:
2500                 ret = H_EINVAL;
2501                 break;
2502         }
2503 
2504         return (ret);
2505 }
2506 
2507 /*
2508  * MSG Functions:
2509  */
2510 uint64_t
2511 hvio_msg_getmsiq(devhandle_t dev_hdl, pcie_msg_type_t msg_type,
2512     msiqid_t *msiq_id)
2513 {
2514         uint64_t        ret = H_EOK;
2515 
2516         switch (msg_type) {
2517         case PCIE_PME_MSG:
2518                 *msiq_id = CSR_FR((caddr_t)dev_hdl, PM_PME_MAPPING, EQNUM);
2519                 break;
2520         case PCIE_PME_ACK_MSG:
2521                 *msiq_id = CSR_FR((caddr_t)dev_hdl, PME_TO_ACK_MAPPING,
2522                     EQNUM);
2523                 break;
2524         case PCIE_CORR_MSG:
2525                 *msiq_id = CSR_FR((caddr_t)dev_hdl, ERR_COR_MAPPING, EQNUM);
2526                 break;
2527         case PCIE_NONFATAL_MSG:
2528                 *msiq_id = CSR_FR((caddr_t)dev_hdl, ERR_NONFATAL_MAPPING,
2529                     EQNUM);
2530                 break;
2531         case PCIE_FATAL_MSG:
2532                 *msiq_id = CSR_FR((caddr_t)dev_hdl, ERR_FATAL_MAPPING, EQNUM);
2533                 break;
2534         default:
2535                 ret = H_EINVAL;
2536                 break;
2537         }
2538 
2539         return (ret);
2540 }
2541 
2542 uint64_t
2543 hvio_msg_setmsiq(devhandle_t dev_hdl, pcie_msg_type_t msg_type,
2544     msiqid_t msiq_id)
2545 {
2546         uint64_t        ret = H_EOK;
2547 
2548         switch (msg_type) {
2549         case PCIE_PME_MSG:
2550                 CSR_FS((caddr_t)dev_hdl, PM_PME_MAPPING, EQNUM, msiq_id);
2551                 break;
2552         case PCIE_PME_ACK_MSG:
2553                 CSR_FS((caddr_t)dev_hdl, PME_TO_ACK_MAPPING, EQNUM, msiq_id);
2554                 break;
2555         case PCIE_CORR_MSG:
2556                 CSR_FS((caddr_t)dev_hdl, ERR_COR_MAPPING, EQNUM, msiq_id);
2557                 break;
2558         case PCIE_NONFATAL_MSG:
2559                 CSR_FS((caddr_t)dev_hdl, ERR_NONFATAL_MAPPING, EQNUM, msiq_id);
2560                 break;
2561         case PCIE_FATAL_MSG:
2562                 CSR_FS((caddr_t)dev_hdl, ERR_FATAL_MAPPING, EQNUM, msiq_id);
2563                 break;
2564         default:
2565                 ret = H_EINVAL;
2566                 break;
2567         }
2568 
2569         return (ret);
2570 }
2571 
2572 uint64_t
2573 hvio_msg_getvalid(devhandle_t dev_hdl, pcie_msg_type_t msg_type,
2574     pcie_msg_valid_state_t *msg_valid_state)
2575 {
2576         uint64_t        ret = H_EOK;
2577 
2578         switch (msg_type) {
2579         case PCIE_PME_MSG:
2580                 *msg_valid_state = CSR_BR((caddr_t)dev_hdl, PM_PME_MAPPING, V);
2581                 break;
2582         case PCIE_PME_ACK_MSG:
2583                 *msg_valid_state = CSR_BR((caddr_t)dev_hdl,
2584                     PME_TO_ACK_MAPPING, V);
2585                 break;
2586         case PCIE_CORR_MSG:
2587                 *msg_valid_state = CSR_BR((caddr_t)dev_hdl, ERR_COR_MAPPING, V);
2588                 break;
2589         case PCIE_NONFATAL_MSG:
2590                 *msg_valid_state = CSR_BR((caddr_t)dev_hdl,
2591                     ERR_NONFATAL_MAPPING, V);
2592                 break;
2593         case PCIE_FATAL_MSG:
2594                 *msg_valid_state = CSR_BR((caddr_t)dev_hdl, ERR_FATAL_MAPPING,
2595                     V);
2596                 break;
2597         default:
2598                 ret = H_EINVAL;
2599                 break;
2600         }
2601 
2602         return (ret);
2603 }
2604 
2605 uint64_t
2606 hvio_msg_setvalid(devhandle_t dev_hdl, pcie_msg_type_t msg_type,
2607     pcie_msg_valid_state_t msg_valid_state)
2608 {
2609         uint64_t        ret = H_EOK;
2610 
2611         switch (msg_valid_state) {
2612         case PCIE_MSG_VALID:
2613                 switch (msg_type) {
2614                 case PCIE_PME_MSG:
2615                         CSR_BS((caddr_t)dev_hdl, PM_PME_MAPPING, V);
2616                         break;
2617                 case PCIE_PME_ACK_MSG:
2618                         CSR_BS((caddr_t)dev_hdl, PME_TO_ACK_MAPPING, V);
2619                         break;
2620                 case PCIE_CORR_MSG:
2621                         CSR_BS((caddr_t)dev_hdl, ERR_COR_MAPPING, V);
2622                         break;
2623                 case PCIE_NONFATAL_MSG:
2624                         CSR_BS((caddr_t)dev_hdl, ERR_NONFATAL_MAPPING, V);
2625                         break;
2626                 case PCIE_FATAL_MSG:
2627                         CSR_BS((caddr_t)dev_hdl, ERR_FATAL_MAPPING, V);
2628                         break;
2629                 default:
2630                         ret = H_EINVAL;
2631                         break;
2632                 }
2633 
2634                 break;
2635         case PCIE_MSG_INVALID:
2636                 switch (msg_type) {
2637                 case PCIE_PME_MSG:
2638                         CSR_BC((caddr_t)dev_hdl, PM_PME_MAPPING, V);
2639                         break;
2640                 case PCIE_PME_ACK_MSG:
2641                         CSR_BC((caddr_t)dev_hdl, PME_TO_ACK_MAPPING, V);
2642                         break;
2643                 case PCIE_CORR_MSG:
2644                         CSR_BC((caddr_t)dev_hdl, ERR_COR_MAPPING, V);
2645                         break;
2646                 case PCIE_NONFATAL_MSG:
2647                         CSR_BC((caddr_t)dev_hdl, ERR_NONFATAL_MAPPING, V);
2648                         break;
2649                 case PCIE_FATAL_MSG:
2650                         CSR_BC((caddr_t)dev_hdl, ERR_FATAL_MAPPING, V);
2651                         break;
2652                 default:
2653                         ret = H_EINVAL;
2654                         break;
2655                 }
2656                 break;
2657         default:
2658                 ret = H_EINVAL;
2659         }
2660 
2661         return (ret);
2662 }
2663 
2664 /*
2665  * Suspend/Resume Functions:
2666  *      (pec, mmu, ib)
2667  *      cb
2668  * Registers saved have all been touched in the XXX_init functions.
2669  */
2670 uint64_t
2671 hvio_suspend(devhandle_t dev_hdl, pxu_t *pxu_p)
2672 {
2673         uint64_t        *config_state;
2674         int             total_size;
2675         int             i;
2676 
2677         if (msiq_suspend(dev_hdl, pxu_p) != H_EOK)
2678                 return (H_EIO);
2679 
2680         total_size = PEC_SIZE + MMU_SIZE + IB_SIZE + IB_MAP_SIZE;
2681         config_state = kmem_zalloc(total_size, KM_NOSLEEP);
2682 
2683         if (config_state == NULL) {
2684                 return (H_EIO);
2685         }
2686 
2687         /*
2688          * Soft state for suspend/resume  from pxu_t
2689          * uint64_t     *pec_config_state;
2690          * uint64_t     *mmu_config_state;
2691          * uint64_t     *ib_intr_map;
2692          * uint64_t     *ib_config_state;
2693          * uint64_t     *xcb_config_state;
2694          */
2695 
2696         /* Save the PEC configuration states */
2697         pxu_p->pec_config_state = config_state;
2698         for (i = 0; i < PEC_KEYS; i++) {
2699                 if ((pec_config_state_regs[i].chip == PX_CHIP_TYPE(pxu_p)) ||
2700                     (pec_config_state_regs[i].chip == PX_CHIP_UNIDENTIFIED)) {
2701                         pxu_p->pec_config_state[i] =
2702                             CSR_XR((caddr_t)dev_hdl,
2703                             pec_config_state_regs[i].reg);
2704                 }
2705         }
2706 
2707         /* Save the MMU configuration states */
2708         pxu_p->mmu_config_state = pxu_p->pec_config_state + PEC_KEYS;
2709         for (i = 0; i < MMU_KEYS; i++) {
2710                 pxu_p->mmu_config_state[i] =
2711                     CSR_XR((caddr_t)dev_hdl, mmu_config_state_regs[i]);
2712         }
2713 
2714         /* Save the interrupt mapping registers */
2715         pxu_p->ib_intr_map = pxu_p->mmu_config_state + MMU_KEYS;
2716         for (i = 0; i < INTERRUPT_MAPPING_ENTRIES; i++) {
2717                 pxu_p->ib_intr_map[i] =
2718                     CSRA_XR((caddr_t)dev_hdl, INTERRUPT_MAPPING, i);
2719         }
2720 
2721         /* Save the IB configuration states */
2722         pxu_p->ib_config_state = pxu_p->ib_intr_map + INTERRUPT_MAPPING_ENTRIES;
2723         for (i = 0; i < IB_KEYS; i++) {
2724                 pxu_p->ib_config_state[i] =
2725                     CSR_XR((caddr_t)dev_hdl, ib_config_state_regs[i]);
2726         }
2727 
2728         return (H_EOK);
2729 }
2730 
2731 void
2732 hvio_resume(devhandle_t dev_hdl, devino_t devino, pxu_t *pxu_p)
2733 {
2734         int             total_size;
2735         sysino_t        sysino;
2736         int             i;
2737         uint64_t        ret;
2738 
2739         /* Make sure that suspend actually did occur */
2740         if (!pxu_p->pec_config_state) {
2741                 return;
2742         }
2743 
2744         /* Restore IB configuration states */
2745         for (i = 0; i < IB_KEYS; i++) {
2746                 CSR_XS((caddr_t)dev_hdl, ib_config_state_regs[i],
2747                     pxu_p->ib_config_state[i]);
2748         }
2749 
2750         /*
2751          * Restore the interrupt mapping registers
2752          * And make sure the intrs are idle.
2753          */
2754         for (i = 0; i < INTERRUPT_MAPPING_ENTRIES; i++) {
2755                 CSRA_FS((caddr_t)dev_hdl, INTERRUPT_CLEAR, i,
2756                     ENTRIES_INT_STATE, INTERRUPT_IDLE_STATE);
2757                 CSRA_XS((caddr_t)dev_hdl, INTERRUPT_MAPPING, i,
2758                     pxu_p->ib_intr_map[i]);
2759         }
2760 
2761         /* Restore MMU configuration states */
2762         /* Clear the cache. */
2763         CSR_XS((caddr_t)dev_hdl, MMU_TTE_CACHE_INVALIDATE, -1ull);
2764 
2765         for (i = 0; i < MMU_KEYS; i++) {
2766                 CSR_XS((caddr_t)dev_hdl, mmu_config_state_regs[i],
2767                     pxu_p->mmu_config_state[i]);
2768         }
2769 
2770         /* Restore PEC configuration states */
2771         /* Make sure all reset bits are low until error is detected */
2772         CSR_XS((caddr_t)dev_hdl, LPU_RESET, 0ull);
2773 
2774         for (i = 0; i < PEC_KEYS; i++) {
2775                 if ((pec_config_state_regs[i].chip == PX_CHIP_TYPE(pxu_p)) ||
2776                     (pec_config_state_regs[i].chip == PX_CHIP_UNIDENTIFIED)) {
2777                         CSR_XS((caddr_t)dev_hdl, pec_config_state_regs[i].reg,
2778                             pxu_p->pec_config_state[i]);
2779                 }
2780         }
2781 
2782         /* Enable PCI-E interrupt */
2783         if ((ret = hvio_intr_devino_to_sysino(dev_hdl, pxu_p, devino,
2784             &sysino)) != H_EOK) {
2785                 cmn_err(CE_WARN,
2786                     "hvio_resume: hvio_intr_devino_to_sysino failed, "
2787                     "ret 0x%lx", ret);
2788         }
2789 
2790         if ((ret =  hvio_intr_setstate(dev_hdl, sysino, INTR_IDLE_STATE))
2791             != H_EOK) {
2792                 cmn_err(CE_WARN,
2793                     "hvio_resume: hvio_intr_setstate failed, "
2794                     "ret 0x%lx", ret);
2795         }
2796 
2797         total_size = PEC_SIZE + MMU_SIZE + IB_SIZE + IB_MAP_SIZE;
2798         kmem_free(pxu_p->pec_config_state, total_size);
2799 
2800         pxu_p->pec_config_state = NULL;
2801         pxu_p->mmu_config_state = NULL;
2802         pxu_p->ib_config_state = NULL;
2803         pxu_p->ib_intr_map = NULL;
2804 
2805         msiq_resume(dev_hdl, pxu_p);
2806 }
2807 
2808 uint64_t
2809 hvio_cb_suspend(devhandle_t dev_hdl, pxu_t *pxu_p)
2810 {
2811         uint64_t *config_state, *cb_regs;
2812         int i, cb_size, cb_keys;
2813 
2814         switch (PX_CHIP_TYPE(pxu_p)) {
2815         case PX_CHIP_OBERON:
2816                 cb_size = UBC_SIZE;
2817                 cb_keys = UBC_KEYS;
2818                 cb_regs = ubc_config_state_regs;
2819                 break;
2820         case PX_CHIP_FIRE:
2821                 cb_size = JBC_SIZE;
2822                 cb_keys = JBC_KEYS;
2823                 cb_regs = jbc_config_state_regs;
2824                 break;
2825         default:
2826                 DBG(DBG_CB, NULL, "hvio_cb_suspend - unknown chip type: 0x%x\n",
2827                     PX_CHIP_TYPE(pxu_p));
2828                 break;
2829         }
2830 
2831         config_state = kmem_zalloc(cb_size, KM_NOSLEEP);
2832 
2833         if (config_state == NULL) {
2834                 return (H_EIO);
2835         }
2836 
2837         /* Save the configuration states */
2838         pxu_p->xcb_config_state = config_state;
2839         for (i = 0; i < cb_keys; i++) {
2840                 pxu_p->xcb_config_state[i] =
2841                     CSR_XR((caddr_t)dev_hdl, cb_regs[i]);
2842         }
2843 
2844         return (H_EOK);
2845 }
2846 
2847 void
2848 hvio_cb_resume(devhandle_t pci_dev_hdl, devhandle_t xbus_dev_hdl,
2849     devino_t devino, pxu_t *pxu_p)
2850 {
2851         sysino_t sysino;
2852         uint64_t *cb_regs;
2853         int i, cb_size, cb_keys;
2854         uint64_t ret;
2855 
2856         switch (PX_CHIP_TYPE(pxu_p)) {
2857         case PX_CHIP_OBERON:
2858                 cb_size = UBC_SIZE;
2859                 cb_keys = UBC_KEYS;
2860                 cb_regs = ubc_config_state_regs;
2861                 /*
2862                  * No reason to have any reset bits high until an error is
2863                  * detected on the link.
2864                  */
2865                 CSR_XS((caddr_t)xbus_dev_hdl, UBC_ERROR_STATUS_CLEAR, -1ull);
2866                 break;
2867         case PX_CHIP_FIRE:
2868                 cb_size = JBC_SIZE;
2869                 cb_keys = JBC_KEYS;
2870                 cb_regs = jbc_config_state_regs;
2871                 /*
2872                  * No reason to have any reset bits high until an error is
2873                  * detected on the link.
2874                  */
2875                 CSR_XS((caddr_t)xbus_dev_hdl, JBC_ERROR_STATUS_CLEAR, -1ull);
2876                 break;
2877         default:
2878                 DBG(DBG_CB, NULL, "hvio_cb_resume - unknown chip type: 0x%x\n",
2879                     PX_CHIP_TYPE(pxu_p));
2880                 break;
2881         }
2882 
2883         ASSERT(pxu_p->xcb_config_state);
2884 
2885         /* Restore the configuration states */
2886         for (i = 0; i < cb_keys; i++) {
2887                 CSR_XS((caddr_t)xbus_dev_hdl, cb_regs[i],
2888                     pxu_p->xcb_config_state[i]);
2889         }
2890 
2891         /* Enable XBC interrupt */
2892         if ((ret = hvio_intr_devino_to_sysino(pci_dev_hdl, pxu_p, devino,
2893             &sysino)) != H_EOK) {
2894                 cmn_err(CE_WARN,
2895                     "hvio_cb_resume: hvio_intr_devino_to_sysino failed, "
2896                     "ret 0x%lx", ret);
2897         }
2898 
2899         if ((ret = hvio_intr_setstate(pci_dev_hdl, sysino, INTR_IDLE_STATE))
2900             != H_EOK) {
2901                 cmn_err(CE_WARN,
2902                     "hvio_cb_resume: hvio_intr_setstate failed, "
2903                     "ret 0x%lx", ret);
2904         }
2905 
2906         kmem_free(pxu_p->xcb_config_state, cb_size);
2907 
2908         pxu_p->xcb_config_state = NULL;
2909 }
2910 
2911 static uint64_t
2912 msiq_suspend(devhandle_t dev_hdl, pxu_t *pxu_p)
2913 {
2914         size_t  bufsz;
2915         volatile uint64_t *cur_p;
2916         int i;
2917 
2918         bufsz = MSIQ_STATE_SIZE + MSIQ_MAPPING_SIZE + MSIQ_OTHER_SIZE;
2919         if ((pxu_p->msiq_config_state = kmem_zalloc(bufsz, KM_NOSLEEP)) ==
2920             NULL)
2921                 return (H_EIO);
2922 
2923         cur_p = pxu_p->msiq_config_state;
2924 
2925         /* Save each EQ state */
2926         for (i = 0; i < EVENT_QUEUE_STATE_ENTRIES; i++, cur_p++)
2927                 *cur_p = CSRA_XR((caddr_t)dev_hdl, EVENT_QUEUE_STATE, i);
2928 
2929         /* Save MSI mapping registers */
2930         for (i = 0; i < MSI_MAPPING_ENTRIES; i++, cur_p++)
2931                 *cur_p = CSRA_XR((caddr_t)dev_hdl, MSI_MAPPING, i);
2932 
2933         /* Save all other MSIQ registers */
2934         for (i = 0; i < MSIQ_OTHER_KEYS; i++, cur_p++)
2935                 *cur_p = CSR_XR((caddr_t)dev_hdl, msiq_config_other_regs[i]);
2936         return (H_EOK);
2937 }
2938 
2939 static void
2940 msiq_resume(devhandle_t dev_hdl, pxu_t *pxu_p)
2941 {
2942         size_t  bufsz;
2943         uint64_t *cur_p, state;
2944         int i;
2945         uint64_t ret;
2946 
2947         bufsz = MSIQ_STATE_SIZE + MSIQ_MAPPING_SIZE + MSIQ_OTHER_SIZE;
2948         cur_p = pxu_p->msiq_config_state;
2949         /*
2950          * Initialize EQ base address register and
2951          * Interrupt Mondo Data 0 register.
2952          */
2953         if ((ret = hvio_msiq_init(dev_hdl, pxu_p)) != H_EOK) {
2954                 cmn_err(CE_WARN,
2955                     "msiq_resume: hvio_msiq_init failed, "
2956                     "ret 0x%lx", ret);
2957         }
2958 
2959         /* Restore EQ states */
2960         for (i = 0; i < EVENT_QUEUE_STATE_ENTRIES; i++, cur_p++) {
2961                 state = (*cur_p) & EVENT_QUEUE_STATE_ENTRIES_STATE_MASK;
2962                 if ((state == EQ_ACTIVE_STATE) || (state == EQ_ERROR_STATE))
2963                         CSRA_BS((caddr_t)dev_hdl, EVENT_QUEUE_CONTROL_SET,
2964                             i, ENTRIES_EN);
2965         }
2966 
2967         /* Restore MSI mapping */
2968         for (i = 0; i < MSI_MAPPING_ENTRIES; i++, cur_p++)
2969                 CSRA_XS((caddr_t)dev_hdl, MSI_MAPPING, i, *cur_p);
2970 
2971         /*
2972          * Restore all other registers. MSI 32 bit address and
2973          * MSI 64 bit address are restored as part of this.
2974          */
2975         for (i = 0; i < MSIQ_OTHER_KEYS; i++, cur_p++)
2976                 CSR_XS((caddr_t)dev_hdl, msiq_config_other_regs[i], *cur_p);
2977 
2978         kmem_free(pxu_p->msiq_config_state, bufsz);
2979         pxu_p->msiq_config_state = NULL;
2980 }
2981 
2982 /*
2983  * sends PME_Turn_Off message to put the link in L2/L3 ready state.
2984  * called by px_goto_l23ready.
2985  * returns DDI_SUCCESS or DDI_FAILURE
2986  */
2987 int
2988 px_send_pme_turnoff(caddr_t csr_base)
2989 {
2990         volatile uint64_t reg;
2991 
2992         reg = CSR_XR(csr_base, TLU_PME_TURN_OFF_GENERATE);
2993         /* If already pending, return failure */
2994         if (reg & (1ull << TLU_PME_TURN_OFF_GENERATE_PTO)) {
2995                 DBG(DBG_PWR, NULL, "send_pme_turnoff: pending PTO bit "
2996                     "tlu_pme_turn_off_generate = %x\n", reg);
2997                 return (DDI_FAILURE);
2998         }
2999 
3000         /* write to PME_Turn_off reg to boradcast */
3001         reg |= (1ull << TLU_PME_TURN_OFF_GENERATE_PTO);
3002         CSR_XS(csr_base,  TLU_PME_TURN_OFF_GENERATE, reg);
3003 
3004         return (DDI_SUCCESS);
3005 }
3006 
3007 /*
3008  * Checks for link being in L1idle state.
3009  * Returns
3010  * DDI_SUCCESS - if the link is in L1idle
3011  * DDI_FAILURE - if the link is not in L1idle
3012  */
3013 int
3014 px_link_wait4l1idle(caddr_t csr_base)
3015 {
3016         uint8_t ltssm_state;
3017         int ntries = px_max_l1_tries;
3018 
3019         while (ntries > 0) {
3020                 ltssm_state = CSR_FR(csr_base, LPU_LTSSM_STATUS1, LTSSM_STATE);
3021                 if (ltssm_state == LPU_LTSSM_L1_IDLE || (--ntries <= 0))
3022                         break;
3023                 delay(1);
3024         }
3025         DBG(DBG_PWR, NULL, "check_for_l1idle: ltssm_state %x\n", ltssm_state);
3026         return ((ltssm_state == LPU_LTSSM_L1_IDLE) ? DDI_SUCCESS : DDI_FAILURE);
3027 }
3028 
3029 /*
3030  * Tranisition the link to L0, after it is down.
3031  */
3032 int
3033 px_link_retrain(caddr_t csr_base)
3034 {
3035         volatile uint64_t reg;
3036 
3037         reg = CSR_XR(csr_base, TLU_CONTROL);
3038         if (!(reg & (1ull << TLU_REMAIN_DETECT_QUIET))) {
3039                 DBG(DBG_PWR, NULL, "retrain_link: detect.quiet bit not set\n");
3040                 return (DDI_FAILURE);
3041         }
3042 
3043         /* Clear link down bit in TLU Other Event Clear Status Register. */
3044         CSR_BS(csr_base, TLU_OTHER_EVENT_STATUS_CLEAR, LDN_P);
3045 
3046         /* Clear Drain bit in TLU Status Register */
3047         CSR_BS(csr_base, TLU_STATUS, DRAIN);
3048 
3049         /* Clear Remain in Detect.Quiet bit in TLU Control Register */
3050         reg = CSR_XR(csr_base, TLU_CONTROL);
3051         reg &= ~(1ull << TLU_REMAIN_DETECT_QUIET);
3052         CSR_XS(csr_base, TLU_CONTROL, reg);
3053 
3054         return (DDI_SUCCESS);
3055 }
3056 
3057 void
3058 px_enable_detect_quiet(caddr_t csr_base)
3059 {
3060         volatile uint64_t tlu_ctrl;
3061 
3062         tlu_ctrl = CSR_XR(csr_base, TLU_CONTROL);
3063         tlu_ctrl |= (1ull << TLU_REMAIN_DETECT_QUIET);
3064         CSR_XS(csr_base, TLU_CONTROL, tlu_ctrl);
3065 }
3066 
3067 static uint_t
3068 oberon_hp_pwron(caddr_t csr_base)
3069 {
3070         volatile uint64_t reg;
3071         boolean_t link_retry, link_up;
3072         int loop, i;
3073 
3074         DBG(DBG_HP, NULL, "oberon_hp_pwron the slot\n");
3075 
3076         /* Check Leaf Reset status */
3077         reg = CSR_XR(csr_base, ILU_ERROR_LOG_ENABLE);
3078         if (!(reg & (1ull << ILU_ERROR_LOG_ENABLE_SPARE3))) {
3079                 DBG(DBG_HP, NULL, "oberon_hp_pwron fails: leaf not reset\n");
3080                 goto fail;
3081         }
3082 
3083         /* Check HP Capable */
3084         if (!CSR_BR(csr_base, TLU_SLOT_CAPABILITIES, HP)) {
3085                 DBG(DBG_HP, NULL, "oberon_hp_pwron fails: leaf not "
3086                     "hotplugable\n");
3087                 goto fail;
3088         }
3089 
3090         /* Check Slot status */
3091         reg = CSR_XR(csr_base, TLU_SLOT_STATUS);
3092         if (!(reg & (1ull << TLU_SLOT_STATUS_PSD)) ||
3093             (reg & (1ull << TLU_SLOT_STATUS_MRLS))) {
3094                 DBG(DBG_HP, NULL, "oberon_hp_pwron fails: slot status %lx\n",
3095                     reg);
3096                 goto fail;
3097         }
3098 
3099         /* Blink power LED, this is done from pciehpc already */
3100 
3101         /* Turn on slot power */
3102         CSR_BS(csr_base, HOTPLUG_CONTROL, PWREN);
3103 
3104         /* power fault detection */
3105         delay(drv_usectohz(25000));
3106         CSR_BS(csr_base, TLU_SLOT_STATUS, PWFD);
3107         CSR_BC(csr_base, HOTPLUG_CONTROL, PWREN);
3108 
3109         /* wait to check power state */
3110         delay(drv_usectohz(25000));
3111 
3112         if (!CSR_BR(csr_base, TLU_SLOT_STATUS, PWFD)) {
3113                 DBG(DBG_HP, NULL, "oberon_hp_pwron fails: power fault\n");
3114                 goto fail1;
3115         }
3116 
3117         /* power is good */
3118         CSR_BS(csr_base, HOTPLUG_CONTROL, PWREN);
3119 
3120         delay(drv_usectohz(25000));
3121         CSR_BS(csr_base, TLU_SLOT_STATUS, PWFD);
3122         CSR_BS(csr_base, TLU_SLOT_CONTROL, PWFDEN);
3123 
3124         /* Turn on slot clock */
3125         CSR_BS(csr_base, HOTPLUG_CONTROL, CLKEN);
3126 
3127         link_up = B_FALSE;
3128         link_retry = B_FALSE;
3129 
3130         for (loop = 0; (loop < link_retry_count) && (link_up == B_FALSE);
3131             loop++) {
3132                 if (link_retry == B_TRUE) {
3133                         DBG(DBG_HP, NULL, "oberon_hp_pwron : retry link loop "
3134                             "%d\n", loop);
3135                         CSR_BS(csr_base, TLU_CONTROL, DRN_TR_DIS);
3136                         CSR_XS(csr_base, FLP_PORT_CONTROL, 0x1);
3137                         delay(drv_usectohz(10000));
3138                         CSR_BC(csr_base, TLU_CONTROL, DRN_TR_DIS);
3139                         CSR_BS(csr_base, TLU_DIAGNOSTIC, IFC_DIS);
3140                         CSR_BC(csr_base, HOTPLUG_CONTROL, N_PERST);
3141                         delay(drv_usectohz(50000));
3142                 }
3143 
3144                 /* Release PCI-E Reset */
3145                 delay(drv_usectohz(wait_perst));
3146                 CSR_BS(csr_base, HOTPLUG_CONTROL, N_PERST);
3147 
3148                 /*
3149                  * Open events' mask
3150                  * This should be done from pciehpc already
3151                  */
3152 
3153                 /* Enable PCIE port */
3154                 delay(drv_usectohz(wait_enable_port));
3155                 CSR_BS(csr_base, TLU_CONTROL, DRN_TR_DIS);
3156                 CSR_XS(csr_base, FLP_PORT_CONTROL, 0x20);
3157 
3158                 /* wait for the link up */
3159                 /* BEGIN CSTYLED */
3160                 for (i = 0; (i < 2) && (link_up == B_FALSE); i++) {
3161                         delay(drv_usectohz(link_status_check));
3162                         reg = CSR_XR(csr_base, DLU_LINK_LAYER_STATUS);
3163 
3164                 if ((((reg >> DLU_LINK_LAYER_STATUS_INIT_FC_SM_STS) &
3165                     DLU_LINK_LAYER_STATUS_INIT_FC_SM_STS_MASK) ==
3166                     DLU_LINK_LAYER_STATUS_INIT_FC_SM_STS_FC_INIT_DONE) &&
3167                     (reg & (1ull << DLU_LINK_LAYER_STATUS_DLUP_STS)) &&
3168                     ((reg &
3169                     DLU_LINK_LAYER_STATUS_LNK_STATE_MACH_STS_MASK) ==
3170                     DLU_LINK_LAYER_STATUS_LNK_STATE_MACH_STS_DL_ACTIVE)) {
3171                         DBG(DBG_HP, NULL, "oberon_hp_pwron : "
3172                             "link is up\n");
3173                         link_up = B_TRUE;
3174                 } else
3175                         link_retry = B_TRUE;
3176 
3177                 }
3178                 /* END CSTYLED */
3179         }
3180 
3181         if (link_up == B_FALSE) {
3182                 DBG(DBG_HP, NULL, "oberon_hp_pwron fails to enable "
3183                     "PCI-E port\n");
3184                 goto fail2;
3185         }
3186 
3187         /* link is up */
3188         CSR_BC(csr_base, TLU_DIAGNOSTIC, IFC_DIS);
3189         CSR_BS(csr_base, FLP_PORT_ACTIVE_STATUS, TRAIN_ERROR);
3190         CSR_BS(csr_base, TLU_UNCORRECTABLE_ERROR_STATUS_CLEAR, TE_P);
3191         CSR_BS(csr_base, TLU_UNCORRECTABLE_ERROR_STATUS_CLEAR, TE_S);
3192         CSR_BC(csr_base, TLU_CONTROL, DRN_TR_DIS);
3193 
3194         /* Restore LUP/LDN */
3195         reg = CSR_XR(csr_base, TLU_OTHER_EVENT_LOG_ENABLE);
3196         if (px_tlu_oe_log_mask & (1ull << TLU_OTHER_EVENT_STATUS_SET_LUP_P))
3197                 reg |= 1ull << TLU_OTHER_EVENT_STATUS_SET_LUP_P;
3198         if (px_tlu_oe_log_mask & (1ull << TLU_OTHER_EVENT_STATUS_SET_LDN_P))
3199                 reg |= 1ull << TLU_OTHER_EVENT_STATUS_SET_LDN_P;
3200         if (px_tlu_oe_log_mask & (1ull << TLU_OTHER_EVENT_STATUS_SET_LUP_S))
3201                 reg |= 1ull << TLU_OTHER_EVENT_STATUS_SET_LUP_S;
3202         if (px_tlu_oe_log_mask & (1ull << TLU_OTHER_EVENT_STATUS_SET_LDN_S))
3203                 reg |= 1ull << TLU_OTHER_EVENT_STATUS_SET_LDN_S;
3204         CSR_XS(csr_base, TLU_OTHER_EVENT_LOG_ENABLE, reg);
3205 
3206         /*
3207          * Initialize Leaf
3208          * SPLS = 00b, SPLV = 11001b, i.e. 25W
3209          */
3210         reg = CSR_XR(csr_base, TLU_SLOT_CAPABILITIES);
3211         reg &= ~(TLU_SLOT_CAPABILITIES_SPLS_MASK <<
3212             TLU_SLOT_CAPABILITIES_SPLS);
3213         reg &= ~(TLU_SLOT_CAPABILITIES_SPLV_MASK <<
3214             TLU_SLOT_CAPABILITIES_SPLV);
3215         reg |= (0x19 << TLU_SLOT_CAPABILITIES_SPLV);
3216         CSR_XS(csr_base, TLU_SLOT_CAPABILITIES, reg);
3217 
3218         /* Turn on Power LED */
3219         reg = CSR_XR(csr_base, TLU_SLOT_CONTROL);
3220         reg &= ~PCIE_SLOTCTL_PWR_INDICATOR_MASK;
3221         reg = pcie_slotctl_pwr_indicator_set(reg,
3222             PCIE_SLOTCTL_INDICATOR_STATE_ON);
3223         CSR_XS(csr_base, TLU_SLOT_CONTROL, reg);
3224 
3225         /* Notify to SCF */
3226         if (CSR_BR(csr_base, HOTPLUG_CONTROL, SLOTPON))
3227                 CSR_BC(csr_base, HOTPLUG_CONTROL, SLOTPON);
3228         else
3229                 CSR_BS(csr_base, HOTPLUG_CONTROL, SLOTPON);
3230 
3231         /* Wait for one second */
3232         delay(drv_sectohz(1));
3233 
3234         return (DDI_SUCCESS);
3235 
3236 fail2:
3237         /* Link up is failed */
3238         CSR_BS(csr_base, FLP_PORT_CONTROL, PORT_DIS);
3239         CSR_BC(csr_base, HOTPLUG_CONTROL, N_PERST);
3240         delay(drv_usectohz(150));
3241 
3242         CSR_BC(csr_base, HOTPLUG_CONTROL, CLKEN);
3243         delay(drv_usectohz(100));
3244 
3245 fail1:
3246         CSR_BC(csr_base, TLU_SLOT_CONTROL, PWFDEN);
3247 
3248         CSR_BC(csr_base, HOTPLUG_CONTROL, PWREN);
3249 
3250         reg = CSR_XR(csr_base, TLU_SLOT_CONTROL);
3251         reg &= ~PCIE_SLOTCTL_PWR_INDICATOR_MASK;
3252         reg = pcie_slotctl_pwr_indicator_set(reg,
3253             PCIE_SLOTCTL_INDICATOR_STATE_OFF);
3254         CSR_XS(csr_base, TLU_SLOT_CONTROL, reg);
3255 
3256         CSR_BC(csr_base, TLU_SLOT_STATUS, PWFD);
3257 
3258 fail:
3259         return ((uint_t)DDI_FAILURE);
3260 }
3261 
3262 hrtime_t oberon_leaf_reset_timeout = 120ll * NANOSEC;   /* 120 seconds */
3263 
3264 static uint_t
3265 oberon_hp_pwroff(caddr_t csr_base)
3266 {
3267         volatile uint64_t reg;
3268         volatile uint64_t reg_tluue, reg_tluce;
3269         hrtime_t start_time, end_time;
3270 
3271         DBG(DBG_HP, NULL, "oberon_hp_pwroff the slot\n");
3272 
3273         /* Blink power LED, this is done from pciehpc already */
3274 
3275         /* Clear Slot Event */
3276         CSR_BS(csr_base, TLU_SLOT_STATUS, PSDC);
3277         CSR_BS(csr_base, TLU_SLOT_STATUS, PWFD);
3278 
3279         /* DRN_TR_DIS on */
3280         CSR_BS(csr_base, TLU_CONTROL, DRN_TR_DIS);
3281         delay(drv_usectohz(10000));
3282 
3283         /* Disable LUP/LDN */
3284         reg = CSR_XR(csr_base, TLU_OTHER_EVENT_LOG_ENABLE);
3285         reg &= ~((1ull << TLU_OTHER_EVENT_STATUS_SET_LDN_P) |
3286             (1ull << TLU_OTHER_EVENT_STATUS_SET_LUP_P) |
3287             (1ull << TLU_OTHER_EVENT_STATUS_SET_LDN_S) |
3288             (1ull << TLU_OTHER_EVENT_STATUS_SET_LUP_S));
3289         CSR_XS(csr_base, TLU_OTHER_EVENT_LOG_ENABLE, reg);
3290 
3291         /* Save the TLU registers */
3292         reg_tluue = CSR_XR(csr_base, TLU_UNCORRECTABLE_ERROR_LOG_ENABLE);
3293         reg_tluce = CSR_XR(csr_base, TLU_CORRECTABLE_ERROR_LOG_ENABLE);
3294         /* All clear */
3295         CSR_XS(csr_base, TLU_UNCORRECTABLE_ERROR_LOG_ENABLE, 0);
3296         CSR_XS(csr_base, TLU_CORRECTABLE_ERROR_LOG_ENABLE, 0);
3297 
3298         /* Disable port */
3299         CSR_BS(csr_base, FLP_PORT_CONTROL, PORT_DIS);
3300 
3301         /* PCIE reset */
3302         delay(drv_usectohz(10000));
3303         CSR_BC(csr_base, HOTPLUG_CONTROL, N_PERST);
3304 
3305         /* PCIE clock stop */
3306         delay(drv_usectohz(150));
3307         CSR_BC(csr_base, HOTPLUG_CONTROL, CLKEN);
3308 
3309         /* Turn off slot power */
3310         delay(drv_usectohz(100));
3311         CSR_BC(csr_base, TLU_SLOT_CONTROL, PWFDEN);
3312         CSR_BC(csr_base, HOTPLUG_CONTROL, PWREN);
3313         delay(drv_usectohz(25000));
3314         CSR_BS(csr_base, TLU_SLOT_STATUS, PWFD);
3315 
3316         /* write 0 to bit 7 of ILU Error Log Enable Register */
3317         CSR_BC(csr_base, ILU_ERROR_LOG_ENABLE, SPARE3);
3318 
3319         /* Set back TLU registers */
3320         CSR_XS(csr_base, TLU_UNCORRECTABLE_ERROR_LOG_ENABLE, reg_tluue);
3321         CSR_XS(csr_base, TLU_CORRECTABLE_ERROR_LOG_ENABLE, reg_tluce);
3322 
3323         /* Power LED off */
3324         reg = CSR_XR(csr_base, TLU_SLOT_CONTROL);
3325         reg &= ~PCIE_SLOTCTL_PWR_INDICATOR_MASK;
3326         reg = pcie_slotctl_pwr_indicator_set(reg,
3327             PCIE_SLOTCTL_INDICATOR_STATE_OFF);
3328         CSR_XS(csr_base, TLU_SLOT_CONTROL, reg);
3329 
3330         /* Indicator LED blink */
3331         reg = CSR_XR(csr_base, TLU_SLOT_CONTROL);
3332         reg &= ~PCIE_SLOTCTL_ATTN_INDICATOR_MASK;
3333         reg = pcie_slotctl_attn_indicator_set(reg,
3334             PCIE_SLOTCTL_INDICATOR_STATE_BLINK);
3335         CSR_XS(csr_base, TLU_SLOT_CONTROL, reg);
3336 
3337         /* Notify to SCF */
3338         if (CSR_BR(csr_base, HOTPLUG_CONTROL, SLOTPON))
3339                 CSR_BC(csr_base, HOTPLUG_CONTROL, SLOTPON);
3340         else
3341                 CSR_BS(csr_base, HOTPLUG_CONTROL, SLOTPON);
3342 
3343         start_time = gethrtime();
3344         /* Check Leaf Reset status */
3345         while (!(CSR_BR(csr_base, ILU_ERROR_LOG_ENABLE, SPARE3))) {
3346                 if ((end_time = (gethrtime() - start_time)) >
3347                     oberon_leaf_reset_timeout) {
3348                         cmn_err(CE_WARN, "Oberon leaf reset is not completed, "
3349                             "even after waiting %llx ticks", end_time);
3350 
3351                         break;
3352                 }
3353 
3354                 /* Wait for one second */
3355                 delay(drv_sectohz(1));
3356         }
3357 
3358         /* Indicator LED off */
3359         reg = CSR_XR(csr_base, TLU_SLOT_CONTROL);
3360         reg &= ~PCIE_SLOTCTL_ATTN_INDICATOR_MASK;
3361         reg = pcie_slotctl_attn_indicator_set(reg,
3362             PCIE_SLOTCTL_INDICATOR_STATE_OFF);
3363         CSR_XS(csr_base, TLU_SLOT_CONTROL, reg);
3364 
3365         return (DDI_SUCCESS);
3366 }
3367 
3368 static uint_t
3369 oberon_hpreg_get(void *cookie, off_t off)
3370 {
3371         caddr_t csr_base = *(caddr_t *)cookie;
3372         volatile uint64_t val = -1ull;
3373 
3374         switch (off) {
3375         case PCIE_SLOTCAP:
3376                 val = CSR_XR(csr_base, TLU_SLOT_CAPABILITIES);
3377                 break;
3378         case PCIE_SLOTCTL:
3379                 val = CSR_XR(csr_base, TLU_SLOT_CONTROL);
3380 
3381                 /* Get the power state */
3382                 val |= (CSR_XR(csr_base, HOTPLUG_CONTROL) &
3383                     (1ull << HOTPLUG_CONTROL_PWREN)) ?
3384                     0 : PCIE_SLOTCTL_PWR_CONTROL;
3385                 break;
3386         case PCIE_SLOTSTS:
3387                 val = CSR_XR(csr_base, TLU_SLOT_STATUS);
3388                 break;
3389         case PCIE_LINKCAP:
3390                 val = CSR_XR(csr_base, TLU_LINK_CAPABILITIES);
3391                 break;
3392         case PCIE_LINKSTS:
3393                 val = CSR_XR(csr_base, TLU_LINK_STATUS);
3394                 break;
3395         default:
3396                 DBG(DBG_HP, NULL, "oberon_hpreg_get(): "
3397                     "unsupported offset 0x%lx\n", off);
3398                 break;
3399         }
3400 
3401         return ((uint_t)val);
3402 }
3403 
3404 static uint_t
3405 oberon_hpreg_put(void *cookie, off_t off, uint_t val)
3406 {
3407         caddr_t csr_base = *(caddr_t *)cookie;
3408         volatile uint64_t pwr_state_on, pwr_fault;
3409         uint_t pwr_off, ret = DDI_SUCCESS;
3410 
3411         DBG(DBG_HP, NULL, "oberon_hpreg_put 0x%lx: cur %x, new %x\n",
3412             off, oberon_hpreg_get(cookie, off), val);
3413 
3414         switch (off) {
3415         case PCIE_SLOTCTL:
3416                 /*
3417                  * Depending on the current state, insertion or removal
3418                  * will go through their respective sequences.
3419                  */
3420                 pwr_state_on = CSR_BR(csr_base, HOTPLUG_CONTROL, PWREN);
3421                 pwr_off = val & PCIE_SLOTCTL_PWR_CONTROL;
3422 
3423                 if (!pwr_off && !pwr_state_on)
3424                         ret = oberon_hp_pwron(csr_base);
3425                 else if (pwr_off && pwr_state_on) {
3426                         pwr_fault = CSR_XR(csr_base, TLU_SLOT_STATUS) &
3427                             (1ull << TLU_SLOT_STATUS_PWFD);
3428 
3429                         if (pwr_fault) {
3430                                 DBG(DBG_HP, NULL, "oberon_hpreg_put: power "
3431                                     "off because of power fault\n");
3432                                 CSR_BC(csr_base, HOTPLUG_CONTROL, PWREN);
3433                         }
3434                         else
3435                                 ret = oberon_hp_pwroff(csr_base);
3436                 } else
3437                         CSR_XS(csr_base, TLU_SLOT_CONTROL, val);
3438                 break;
3439         case PCIE_SLOTSTS:
3440                 CSR_XS(csr_base, TLU_SLOT_STATUS, val);
3441                 break;
3442         default:
3443                 DBG(DBG_HP, NULL, "oberon_hpreg_put(): "
3444                     "unsupported offset 0x%lx\n", off);
3445                 ret = (uint_t)DDI_FAILURE;
3446                 break;
3447         }
3448 
3449         return (ret);
3450 }
3451 
3452 int
3453 hvio_hotplug_init(dev_info_t *dip, void *arg)
3454 {
3455         pcie_hp_regops_t *regops = (pcie_hp_regops_t *)arg;
3456         px_t    *px_p = DIP_TO_STATE(dip);
3457         pxu_t   *pxu_p = (pxu_t *)px_p->px_plat_p;
3458         volatile uint64_t reg;
3459 
3460         if (PX_CHIP_TYPE(pxu_p) == PX_CHIP_OBERON) {
3461                 if (!CSR_BR((caddr_t)pxu_p->px_address[PX_REG_CSR],
3462                     TLU_SLOT_CAPABILITIES, HP)) {
3463                         DBG(DBG_HP, NULL, "%s%d: hotplug capabale not set\n",
3464                             ddi_driver_name(dip), ddi_get_instance(dip));
3465                         return (DDI_FAILURE);
3466                 }
3467 
3468                 /* For empty or disconnected slot, disable LUP/LDN */
3469                 if (!CSR_BR((caddr_t)pxu_p->px_address[PX_REG_CSR],
3470                     TLU_SLOT_STATUS, PSD) ||
3471                     !CSR_BR((caddr_t)pxu_p->px_address[PX_REG_CSR],
3472                     HOTPLUG_CONTROL, PWREN)) {
3473 
3474                         reg = CSR_XR((caddr_t)pxu_p->px_address[PX_REG_CSR],
3475                             TLU_OTHER_EVENT_LOG_ENABLE);
3476                         reg &= ~((1ull << TLU_OTHER_EVENT_STATUS_SET_LDN_P) |
3477                             (1ull << TLU_OTHER_EVENT_STATUS_SET_LUP_P) |
3478                             (1ull << TLU_OTHER_EVENT_STATUS_SET_LDN_S) |
3479                             (1ull << TLU_OTHER_EVENT_STATUS_SET_LUP_S));
3480                         CSR_XS((caddr_t)pxu_p->px_address[PX_REG_CSR],
3481                             TLU_OTHER_EVENT_LOG_ENABLE, reg);
3482                 }
3483 
3484                 regops->get = oberon_hpreg_get;
3485                 regops->put = oberon_hpreg_put;
3486 
3487                 /* cookie is the csr_base */
3488                 regops->cookie = (void *)&pxu_p->px_address[PX_REG_CSR];
3489 
3490                 return (DDI_SUCCESS);
3491         }
3492 
3493         return (DDI_ENOTSUP);
3494 }
3495 
3496 int
3497 hvio_hotplug_uninit(dev_info_t *dip)
3498 {
3499         px_t    *px_p = DIP_TO_STATE(dip);
3500         pxu_t   *pxu_p = (pxu_t *)px_p->px_plat_p;
3501 
3502         if (PX_CHIP_TYPE(pxu_p) == PX_CHIP_OBERON)
3503                 return (DDI_SUCCESS);
3504 
3505         return (DDI_FAILURE);
3506 }