1 /*
   2  * CDDL HEADER START
   3  *
   4  * The contents of this file are subject to the terms of the
   5  * Common Development and Distribution License (the "License").
   6  * You may not use this file except in compliance with the License.
   7  *
   8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
   9  * or http://www.opensolaris.org/os/licensing.
  10  * See the License for the specific language governing permissions
  11  * and limitations under the License.
  12  *
  13  * When distributing Covered Code, include this CDDL HEADER in each
  14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
  15  * If applicable, add the following below this CDDL HEADER, with the
  16  * fields enclosed by brackets "[]" replaced with your own identifying
  17  * information: Portions Copyright [yyyy] [name of copyright owner]
  18  *
  19  * CDDL HEADER END
  20  */
  21 /*
  22  * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
  23  * Copyright 2012 Garrett D'Amore <garrett@damore.org>.  All rights reserved.
  24  */
  25 
  26 /*
  27  * PX mmu initialization and configuration
  28  */
  29 #include <sys/types.h>
  30 #include <sys/kmem.h>
  31 #include <sys/async.h>
  32 #include <sys/sysmacros.h>
  33 #include <sys/sunddi.h>
  34 #include <sys/ddi_impldefs.h>
  35 #include <sys/vmem.h>
  36 #include <sys/machsystm.h>        /* lddphys() */
  37 #include <sys/iommutsb.h>
  38 #include "px_obj.h"
  39 
  40 int
  41 px_mmu_attach(px_t *px_p)
  42 {
  43         dev_info_t              *dip = px_p->px_dip;
  44         px_mmu_t                        *mmu_p;
  45         uint32_t                tsb_i = 0;
  46         char                    map_name[32];
  47         px_dvma_range_prop_t    *dvma_prop;
  48         int                     dvma_prop_len;
  49         uint32_t                cache_size, tsb_entries;
  50 
  51         /*
  52          * Allocate mmu state structure and link it to the
  53          * px state structure.
  54          */
  55         mmu_p = kmem_zalloc(sizeof (px_mmu_t), KM_SLEEP);
  56 
  57         px_p->px_mmu_p = mmu_p;
  58         mmu_p->mmu_px_p = px_p;
  59         mmu_p->mmu_inst = ddi_get_instance(dip);
  60 
  61         /*
  62          * Check for "virtual-dma" property that specifies
  63          * the DVMA range.
  64          */
  65         if (ddi_getlongprop(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
  66             "virtual-dma", (caddr_t)&dvma_prop, &dvma_prop_len) !=
  67             DDI_PROP_SUCCESS) {
  68 
  69                 DBG(DBG_ATTACH, dip, "Getting virtual-dma failed\n");
  70 
  71                 kmem_free(mmu_p, sizeof (px_mmu_t));
  72                 px_p->px_mmu_p = NULL;
  73 
  74                 return (DDI_FAILURE);
  75         }
  76 
  77         mmu_p->mmu_dvma_base = dvma_prop->dvma_base;
  78         mmu_p->mmu_dvma_end = dvma_prop->dvma_base +
  79             (dvma_prop->dvma_len - 1);
  80         tsb_entries = MMU_BTOP(dvma_prop->dvma_len);
  81 
  82         kmem_free(dvma_prop, dvma_prop_len);
  83 
  84         /*
  85          * Setup base and bounds for DVMA and bypass mappings.
  86          */
  87         mmu_p->mmu_dvma_cache_locks =
  88             kmem_zalloc(px_dvma_page_cache_entries, KM_SLEEP);
  89 
  90         mmu_p->dvma_base_pg = MMU_BTOP(mmu_p->mmu_dvma_base);
  91         mmu_p->mmu_dvma_reserve = tsb_entries >> 1;
  92         mmu_p->dvma_end_pg = MMU_BTOP(mmu_p->mmu_dvma_end);
  93 
  94         /*
  95          * Create a virtual memory map for dvma address space.
  96          * Reserve 'size' bytes of low dvma space for fast track cache.
  97          */
  98         (void) snprintf(map_name, sizeof (map_name), "%s%d_dvma",
  99             ddi_driver_name(dip), ddi_get_instance(dip));
 100 
 101         cache_size = MMU_PTOB(px_dvma_page_cache_entries *
 102             px_dvma_page_cache_clustsz);
 103         mmu_p->mmu_dvma_fast_end = mmu_p->mmu_dvma_base +
 104             cache_size - 1;
 105 
 106         mmu_p->mmu_dvma_map = vmem_create(map_name,
 107             (void *)(mmu_p->mmu_dvma_fast_end + 1),
 108             MMU_PTOB(tsb_entries) - cache_size, MMU_PAGE_SIZE,
 109             NULL, NULL, NULL, MMU_PAGE_SIZE, VM_SLEEP);
 110 
 111         mutex_init(&mmu_p->dvma_debug_lock, NULL, MUTEX_DRIVER, NULL);
 112 
 113         for (tsb_i = 0; tsb_i < tsb_entries; tsb_i++) {
 114                 r_addr_t ra = 0;
 115                 io_attributes_t attr;
 116                 caddr_t va;
 117 
 118                 if (px_lib_iommu_getmap(px_p->px_dip, PCI_TSBID(0, tsb_i),
 119                     &attr, &ra) != DDI_SUCCESS)
 120                         continue;
 121 
 122                 va = (caddr_t)(MMU_PTOB(mmu_p->dvma_base_pg + tsb_i));
 123 
 124                 if (va <= (caddr_t)mmu_p->mmu_dvma_fast_end) {
 125                         uint32_t cache_i;
 126 
 127                         /*
 128                          * the va is within the *fast* dvma range; therefore,
 129                          * lock its fast dvma page cache cluster in order to
 130                          * both preserve the TTE and prevent the use of this
 131                          * fast dvma page cache cluster by px_dvma_map_fast().
 132                          * the lock value 0xFF comes from ldstub().
 133                          */
 134                         cache_i = tsb_i / px_dvma_page_cache_clustsz;
 135                         ASSERT(cache_i < px_dvma_page_cache_entries);
 136                         mmu_p->mmu_dvma_cache_locks[cache_i] = 0xFF;
 137                 } else {
 138                         (void) vmem_xalloc(mmu_p->mmu_dvma_map, MMU_PAGE_SIZE,
 139                             MMU_PAGE_SIZE, 0, 0, va, va + MMU_PAGE_SIZE,
 140                             VM_NOSLEEP | VM_BESTFIT | VM_PANIC);
 141                 }
 142         }
 143 
 144         return (DDI_SUCCESS);
 145 }
 146 
 147 void
 148 px_mmu_detach(px_t *px_p)
 149 {
 150         px_mmu_t *mmu_p = px_p->px_mmu_p;
 151 
 152         (void) px_lib_iommu_detach(px_p);
 153 
 154         /*
 155          * Free the dvma resource map.
 156          */
 157         vmem_destroy(mmu_p->mmu_dvma_map);
 158 
 159         kmem_free(mmu_p->mmu_dvma_cache_locks,
 160             px_dvma_page_cache_entries);
 161 
 162         if (PX_DVMA_DBG_ON(mmu_p))
 163                 px_dvma_debug_fini(mmu_p);
 164 
 165         mutex_destroy(&mmu_p->dvma_debug_lock);
 166 
 167         /*
 168          * Free the mmu state structure.
 169          */
 170         kmem_free(mmu_p, sizeof (px_mmu_t));
 171         px_p->px_mmu_p = NULL;
 172 }
 173 
 174 int
 175 px_mmu_map_pages(px_mmu_t *mmu_p, ddi_dma_impl_t *mp, px_dvma_addr_t dvma_pg,
 176     size_t npages, size_t pfn_index)
 177 {
 178         dev_info_t      *dip = mmu_p->mmu_px_p->px_dip;
 179         px_dvma_addr_t  pg_index = MMU_PAGE_INDEX(mmu_p, dvma_pg);
 180         io_attributes_t attr = PX_GET_MP_TTE(mp->dmai_tte);
 181 
 182         ASSERT(npages <= mp->dmai_ndvmapages);
 183         DBG(DBG_MAP_WIN, dip, "px_mmu_map_pages:%x+%x=%x "
 184             "npages=0x%x pfn_index=0x%x\n", (uint_t)mmu_p->dvma_base_pg,
 185             (uint_t)pg_index, dvma_pg, (uint_t)npages, (uint_t)pfn_index);
 186 
 187         if (px_lib_iommu_map(dip, PCI_TSBID(0, pg_index), npages,
 188             PX_ADD_ATTR_EXTNS(attr, mp->dmai_bdf), (void *)mp, pfn_index,
 189             MMU_MAP_PFN) != DDI_SUCCESS) {
 190                 DBG(DBG_MAP_WIN, dip, "px_mmu_map_pages: "
 191                     "px_lib_iommu_map failed\n");
 192 
 193                 return (DDI_FAILURE);
 194         }
 195 
 196         if (!PX_MAP_BUFZONE(mp))
 197                 goto done;
 198 
 199         DBG(DBG_MAP_WIN, dip, "px_mmu_map_pages: redzone pg=%x\n",
 200             pg_index + npages);
 201 
 202         ASSERT(PX_HAS_REDZONE(mp));
 203 
 204         if (px_lib_iommu_map(dip, PCI_TSBID(0, pg_index + npages), 1,
 205             PX_ADD_ATTR_EXTNS(attr, mp->dmai_bdf), (void *)mp,
 206             pfn_index + npages - 1, MMU_MAP_PFN) != DDI_SUCCESS) {
 207                 DBG(DBG_MAP_WIN, dip, "px_mmu_map_pages: mapping "
 208                     "REDZONE page failed\n");
 209 
 210                 if (px_lib_iommu_demap(dip, PCI_TSBID(0, pg_index), npages)
 211                     != DDI_SUCCESS) {
 212                         DBG(DBG_MAP_WIN, dip, "px_lib_iommu_demap: failed\n");
 213                 }
 214                 return (DDI_FAILURE);
 215         }
 216 
 217 done:
 218         if (PX_DVMA_DBG_ON(mmu_p))
 219                 px_dvma_alloc_debug(mmu_p, (char *)mp->dmai_mapping,
 220                     mp->dmai_size, mp);
 221 
 222         return (DDI_SUCCESS);
 223 }
 224 
 225 void
 226 px_mmu_unmap_pages(px_mmu_t *mmu_p, ddi_dma_impl_t *mp, px_dvma_addr_t dvma_pg,
 227     uint_t npages)
 228 {
 229         px_dvma_addr_t  pg_index = MMU_PAGE_INDEX(mmu_p, dvma_pg);
 230 
 231         DBG(DBG_UNMAP_WIN, mmu_p->mmu_px_p->px_dip,
 232             "px_mmu_unmap_pages:%x+%x=%x npages=0x%x\n",
 233             (uint_t)mmu_p->dvma_base_pg, (uint_t)pg_index, dvma_pg,
 234             (uint_t)npages);
 235 
 236         if (px_lib_iommu_demap(mmu_p->mmu_px_p->px_dip,
 237             PCI_TSBID(0, pg_index), npages) != DDI_SUCCESS) {
 238                 DBG(DBG_UNMAP_WIN, mmu_p->mmu_px_p->px_dip,
 239                     "px_lib_iommu_demap: failed\n");
 240         }
 241 
 242         if (!PX_MAP_BUFZONE(mp))
 243                 return;
 244 
 245         DBG(DBG_UNMAP_WIN, mmu_p->mmu_px_p->px_dip, "px_mmu_unmap_pages: "
 246             "redzone pg=%x\n", pg_index + npages);
 247 
 248         ASSERT(PX_HAS_REDZONE(mp));
 249 
 250         if (px_lib_iommu_demap(mmu_p->mmu_px_p->px_dip,
 251             PCI_TSBID(0, pg_index + npages), 1) != DDI_SUCCESS) {
 252                 DBG(DBG_UNMAP_WIN, mmu_p->mmu_px_p->px_dip,
 253                     "px_lib_iommu_demap: failed\n");
 254         }
 255 }
 256 
 257 /*
 258  * px_mmu_map_window - map a dvma window into the mmu
 259  * used by: px_dma_win()
 260  * return value: none
 261  */
 262 /*ARGSUSED*/
 263 int
 264 px_mmu_map_window(px_mmu_t *mmu_p, ddi_dma_impl_t *mp, px_window_t win_no)
 265 {
 266         uint32_t obj_pg0_off = mp->dmai_roffset;
 267         uint32_t win_pg0_off = win_no ? 0 : obj_pg0_off;
 268         size_t win_size = mp->dmai_winsize;
 269         size_t pfn_index = win_size * win_no;                   /* temp value */
 270         size_t obj_off = win_no ? pfn_index - obj_pg0_off : 0;  /* xferred sz */
 271         px_dvma_addr_t dvma_pg = MMU_BTOP(mp->dmai_mapping);
 272         size_t res_size = mp->dmai_object.dmao_size - obj_off + win_pg0_off;
 273         int ret = DDI_SUCCESS;
 274 
 275         ASSERT(!(win_size & MMU_PAGE_OFFSET));
 276         if (win_no >= mp->dmai_nwin)
 277                 return (ret);
 278         if (res_size < win_size)             /* last window */
 279                 win_size = res_size;            /* mp->dmai_winsize unchanged */
 280 
 281         mp->dmai_mapping = MMU_PTOB(dvma_pg) | win_pg0_off;
 282         mp->dmai_size = win_size - win_pg0_off;      /* cur win xferrable size */
 283         mp->dmai_offset = obj_off;           /* win offset into object */
 284         pfn_index = MMU_BTOP(pfn_index);        /* index into pfnlist */
 285         ret = px_mmu_map_pages(mmu_p, mp, dvma_pg, MMU_BTOPR(win_size),
 286             pfn_index);
 287 
 288         return (ret);
 289 }
 290 
 291 /*
 292  * px_mmu_unmap_window
 293  * This routine is called to break down the mmu mappings to a dvma window.
 294  * Non partial mappings are viewed as single window mapping.
 295  * used by: px_dma_unbindhdl(), px_dma_window(),
 296  *      and px_dma_ctlops() - DDI_DMA_FREE, DDI_DMA_MOVWIN, DDI_DMA_NEXTWIN
 297  * return value: none
 298  */
 299 /*ARGSUSED*/
 300 void
 301 px_mmu_unmap_window(px_mmu_t *mmu_p, ddi_dma_impl_t *mp)
 302 {
 303         px_dvma_addr_t dvma_pg = MMU_BTOP(mp->dmai_mapping);
 304         uint_t npages = MMU_BTOP(mp->dmai_winsize);
 305 
 306         px_mmu_unmap_pages(mmu_p, mp, dvma_pg, npages);
 307 
 308         if (PX_DVMA_DBG_ON(mmu_p))
 309                 px_dvma_free_debug(mmu_p, (char *)mp->dmai_mapping,
 310                     mp->dmai_size, mp);
 311 }
 312 
 313 
 314 #if 0
 315 /*
 316  * The following table is for reference only. It denotes the
 317  * the TSB table size measured in number of 8 byte entries.
 318  * It is represented by bits 3:0 in the MMU TSB CTRL REG.
 319  */
 320 static int px_mmu_tsb_sizes[] = {
 321         0x0,            /* 1K */
 322         0x1,            /* 2K */
 323         0x2,            /* 4K */
 324         0x3,            /* 8K */
 325         0x4,            /* 16K */
 326         0x5,            /* 32K */
 327         0x6,            /* 64K */
 328         0x7,            /* 128K */
 329         0x8             /* 256K */
 330 };
 331 #endif
 332 
 333 static char *px_mmu_errsts[] = {
 334         "Protection Error", "Invalid Error", "Timeout", "ECC Error(UE)"
 335 };
 336 
 337 /*ARGSUSED*/
 338 static int
 339 px_log_mmu_err(px_t *px_p)
 340 {
 341         /*
 342          * Place holder, the correct eror bits need tobe logged.
 343          */
 344         return (0);
 345 }