1 /* BEGIN CSTYLED */
   2 
   3 /*
   4  * Copyright (c) 2009, Intel Corporation.
   5  * All Rights Reserved.
   6  *
   7  * Permission is hereby granted, free of charge, to any person obtaining a
   8  * copy of this software and associated documentation files (the "Software"),
   9  * to deal in the Software without restriction, including without limitation
  10  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  11  * and/or sell copies of the Software, and to permit persons to whom the
  12  * Software is furnished to do so, subject to the following conditions:
  13  *
  14  * The above copyright notice and this permission notice (including the next
  15  * paragraph) shall be included in all copies or substantial portions of the
  16  * Software.
  17  *
  18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  19  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  20  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  21  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  22  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  23  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
  24  * IN THE SOFTWARE.
  25  *
  26  * Authors:
  27  *    Eric Anholt <eric@anholt.net>
  28  *
  29  */
  30 
  31 /*
  32  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
  33  * Use is subject to license terms.
  34  */
  35 
  36 #include "drmP.h"
  37 #include "drm.h"
  38 #include "i915_drm.h"
  39 #include "i915_drv.h"
  40 
  41 /** @file i915_gem_tiling.c
  42  *
  43  * Support for managing tiling state of buffer objects.
  44  *
  45  * The idea behind tiling is to increase cache hit rates by rearranging
  46  * pixel data so that a group of pixel accesses are in the same cacheline.
  47  * Performance improvement from doing this on the back/depth buffer are on
  48  * the order of 30%.
  49  *
  50  * Intel architectures make this somewhat more complicated, though, by
  51  * adjustments made to addressing of data when the memory is in interleaved
  52  * mode (matched pairs of DIMMS) to improve memory bandwidth.
  53  * For interleaved memory, the CPU sends every sequential 64 bytes
  54  * to an alternate memory channel so it can get the bandwidth from both.
  55  *
  56  * The GPU also rearranges its accesses for increased bandwidth to interleaved
  57  * memory, and it matches what the CPU does for non-tiled.  However, when tiled
  58  * it does it a little differently, since one walks addresses not just in the
  59  * X direction but also Y.  So, along with alternating channels when bit
  60  * 6 of the address flips, it also alternates when other bits flip --  Bits 9
  61  * (every 512 bytes, an X tile scanline) and 10 (every two X tile scanlines)
  62  * are common to both the 915 and 965-class hardware.
  63  *
  64  * The CPU also sometimes XORs in higher bits as well, to improve
  65  * bandwidth doing strided access like we do so frequently in graphics.  This
  66  * is called "Channel XOR Randomization" in the MCH documentation.  The result
  67  * is that the CPU is XORing in either bit 11 or bit 17 to bit 6 of its address
  68  * decode.
  69  *
  70  * All of this bit 6 XORing has an effect on our memory management,
  71  * as we need to make sure that the 3d driver can correctly address object
  72  * contents.
  73  *
  74  * If we don't have interleaved memory, all tiling is safe and no swizzling is
  75  * required.
  76  *
  77  * When bit 17 is XORed in, we simply refuse to tile at all.  Bit
  78  * 17 is not just a page offset, so as we page an objet out and back in,
  79  * individual pages in it will have different bit 17 addresses, resulting in
  80  * each 64 bytes being swapped with its neighbor!
  81  *
  82  * Otherwise, if interleaved, we have to tell the 3d driver what the address
  83  * swizzling it needs to do is, since it's writing with the CPU to the pages
  84  * (bit 6 and potentially bit 11 XORed in), and the GPU is reading from the
  85  * pages (bit 6, 9, and 10 XORed in), resulting in a cumulative bit swizzling
  86  * required by the CPU of XORing in bit 6, 9, 10, and potentially 11, in order
  87  * to match what the GPU expects.
  88  */
  89 
  90 /**
  91  * Detects bit 6 swizzling of address lookup between IGD access and CPU
  92  * access through main memory.
  93  */
  94 void
  95 i915_gem_detect_bit_6_swizzle(struct drm_device *dev)
  96 {
  97         drm_i915_private_t *dev_priv = dev->dev_private;
  98         uint32_t swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN;
  99         uint32_t swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN;
 100 
 101         if (!IS_I9XX(dev)) {
 102                 /* As far as we know, the 865 doesn't have these bit 6
 103                  * swizzling issues.
 104                  */
 105                 swizzle_x = I915_BIT_6_SWIZZLE_NONE;
 106                 swizzle_y = I915_BIT_6_SWIZZLE_NONE;
 107                 } else if (IS_MOBILE(dev)) {
 108                 uint32_t dcc;
 109 
 110                 /* On mobile 9xx chipsets, channel interleave by the CPU is
 111                  * determined by DCC.  For single-channel, neither the CPU
 112                  * nor the GPU do swizzling.  For dual channel interleaved,
 113                  * the GPU's interleave is bit 9 and 10 for X tiled, and bit
 114                  * 9 for Y tiled.  The CPU's interleave is independent, and
 115                  * can be based on either bit 11 (haven't seen this yet) or
 116                  * bit 17 (common).
 117                  */
 118 
 119                 dcc = I915_READ(DCC);
 120                 switch (dcc & DCC_ADDRESSING_MODE_MASK) {
 121                 case DCC_ADDRESSING_MODE_SINGLE_CHANNEL:
 122                 case DCC_ADDRESSING_MODE_DUAL_CHANNEL_ASYMMETRIC:
 123                         swizzle_x = I915_BIT_6_SWIZZLE_NONE;
 124                         swizzle_y = I915_BIT_6_SWIZZLE_NONE;
 125                         break;
 126                 case DCC_ADDRESSING_MODE_DUAL_CHANNEL_INTERLEAVED:
 127                         if (dcc & DCC_CHANNEL_XOR_DISABLE) {
 128                                 /* This is the base swizzling by the GPU for
 129                                  * tiled buffers.
 130                                  */
 131                                 swizzle_x = I915_BIT_6_SWIZZLE_9_10;
 132                                 swizzle_y = I915_BIT_6_SWIZZLE_9;
 133                         } else if ((dcc & DCC_CHANNEL_XOR_BIT_17) == 0) {
 134                                 /* Bit 11 swizzling by the CPU in addition. */
 135                                 swizzle_x = I915_BIT_6_SWIZZLE_9_10_11;
 136                                 swizzle_y = I915_BIT_6_SWIZZLE_9_11;
 137                         } else {
 138                                 /* Bit 17 swizzling by the CPU in addition. */  
 139                                 swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN;
 140                                 swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN;
 141                         }
 142                         break;
 143                 }
 144                 if (dcc == 0xffffffff) {
 145                         DRM_ERROR("Couldn't read from MCHBAR.  "
 146                                   "Disabling tiling.\n");
 147                         swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN;
 148                         swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN;
 149                 }
 150         } else {
 151                 /* The 965, G33, and newer, have a very flexible memory
 152                  * configuration.  It will enable dual-channel mode
 153                  * (interleaving) on as much memory as it can, and the GPU
 154                  * will additionally sometimes enable different bit 6
 155                  * swizzling for tiled objects from the CPU.
 156                  *
 157                  * Here's what I found on the G965:
 158                  *    slot fill         memory size  swizzling
 159                  * 0A   0B   1A   1B    1-ch   2-ch
 160                  * 512  0    0    0     512    0     O
 161                  * 512  0    512  0     16     1008  X
 162                  * 512  0    0    512   16     1008  X
 163                  * 0    512  0    512   16     1008  X
 164                  * 1024 1024 1024 0     2048   1024  O
 165                  *
 166                  * We could probably detect this based on either the DRB
 167                  * matching, which was the case for the swizzling required in
 168                  * the table above, or from the 1-ch value being less than
 169                  * the minimum size of a rank.
 170                  */
 171                 if (I915_READ16(C0DRB3) != I915_READ16(C1DRB3)) {
 172                         swizzle_x = I915_BIT_6_SWIZZLE_NONE;
 173                         swizzle_y = I915_BIT_6_SWIZZLE_NONE;
 174                 } else {
 175                         swizzle_x = I915_BIT_6_SWIZZLE_9_10;
 176                         swizzle_y = I915_BIT_6_SWIZZLE_9;
 177                 }
 178         }
 179 
 180        /* FIXME: check with memory config on IGDNG */
 181        if (IS_IGDNG(dev)) {
 182                swizzle_x = I915_BIT_6_SWIZZLE_9_10;
 183                swizzle_y = I915_BIT_6_SWIZZLE_9;
 184        }
 185 
 186         dev_priv->mm.bit_6_swizzle_x = swizzle_x;
 187         dev_priv->mm.bit_6_swizzle_y = swizzle_y;
 188 }
 189 
 190 
 191 /**
 192  * Returns the size of the fence for a tiled object of the given size.
 193  */
 194 static int
 195 i915_get_fence_size(struct drm_device *dev, int size)
 196 {
 197         int i;
 198         int start;
 199 
 200         if (IS_I965G(dev)) {
 201                 /* The 965 can have fences at any page boundary. */
 202 
 203                 return (size + PAGE_SIZE-1) & ~(PAGE_SIZE-1);
 204         } else {
 205                 /* Align the size to a power of two greater than the smallest
 206                  * fence size.
 207                  */
 208                 if (IS_I9XX(dev))
 209                         start = 1024 * 1024;
 210                 else
 211                         start = 512 * 1024;
 212 
 213                 for (i = start; i < size; i <<= 1)
 214                         ;
 215 
 216                 return i;
 217         }
 218 }
 219 
 220 /* Check pitch constriants for all chips & tiling formats */
 221 static int 
 222 i915_tiling_ok(struct drm_device *dev, int stride, int size, int tiling_mode)
 223 {
 224         int tile_width;
 225 
 226         /* Linear is always fine */
 227         if (tiling_mode == I915_TILING_NONE)
 228                 return 1;
 229 
 230         if (tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev))
 231                 tile_width = 128;
 232         else
 233                 tile_width = 512;
 234 
 235         if (stride == 0)
 236                 return 0;
 237 
 238         /* 965+ just needs multiples of tile width */
 239         if (IS_I965G(dev)) {
 240                 if (stride & (tile_width - 1))
 241                         return 0;
 242                 return 1;
 243         }
 244 
 245         /* Pre-965 needs power of two tile widths */
 246         if (stride < tile_width)
 247                 return 0;
 248 
 249         if (stride & (stride - 1))
 250                 return 0;
 251 
 252         /* We don't handle the aperture area covered by the fence being bigger
 253          * than the object size.
 254          */
 255         if (i915_get_fence_size(dev, size) != size)
 256                 return 0;
 257 
 258         return 1;
 259 }
 260 
 261 /**
 262  * Sets the tiling mode of an object, returning the required swizzling of
 263  * bit 6 of addresses in the object.
 264  */
 265 /*ARGSUSED*/
 266 int
 267 i915_gem_set_tiling(DRM_IOCTL_ARGS)
 268 {
 269         DRM_DEVICE;
 270         struct drm_i915_gem_set_tiling args;
 271         drm_i915_private_t *dev_priv = dev->dev_private;
 272         struct drm_gem_object *obj;
 273         struct drm_i915_gem_object *obj_priv;
 274         int ret;
 275 
 276         if (dev->driver->use_gem != 1)
 277                 return ENODEV;
 278 
 279         DRM_COPYFROM_WITH_RETURN(&args,
 280             (struct drm_i915_gem_set_tiling __user *) data, sizeof(args));
 281 
 282         obj = drm_gem_object_lookup(fpriv, args.handle);
 283         if (obj == NULL)
 284                 return EINVAL;
 285         obj_priv = obj->driver_private;
 286 
 287         if (!i915_tiling_ok(dev, args.stride, obj->size, args.tiling_mode)) {
 288                 drm_gem_object_unreference(obj);
 289                 DRM_DEBUG("i915 tiling is not OK");
 290                 return EINVAL;
 291         }
 292 
 293         spin_lock(&dev->struct_mutex);
 294 
 295         if (args.tiling_mode == I915_TILING_NONE) {
 296                 args.swizzle_mode = I915_BIT_6_SWIZZLE_NONE;
 297         } else {
 298                 if (args.tiling_mode == I915_TILING_X)
 299                         args.swizzle_mode = dev_priv->mm.bit_6_swizzle_x;
 300                 else
 301                         args.swizzle_mode = dev_priv->mm.bit_6_swizzle_y;
 302                 /* If we can't handle the swizzling, make it untiled. */
 303                 if (args.swizzle_mode == I915_BIT_6_SWIZZLE_UNKNOWN) {
 304                         args.tiling_mode = I915_TILING_NONE;
 305                         args.swizzle_mode = I915_BIT_6_SWIZZLE_NONE;
 306                 }
 307         }
 308 
 309         if (args.tiling_mode != obj_priv->tiling_mode) {
 310                 int ret;
 311 
 312                 /* Unbind the object, as switching tiling means we're
 313                  * switching the cache organization due to fencing, probably.
 314                  */
 315                 ret = i915_gem_object_unbind(obj, 1);
 316                 if (ret != 0) {
 317                         args.tiling_mode = obj_priv->tiling_mode;
 318                         spin_unlock(&dev->struct_mutex);
 319                         drm_gem_object_unreference(obj);
 320                         DRM_ERROR("tiling switch!! unbind error %d", ret);
 321                         return ret;
 322                 }
 323                 obj_priv->tiling_mode = args.tiling_mode;
 324         }
 325         obj_priv->stride = args.stride;
 326 
 327         ret = DRM_COPY_TO_USER((struct drm_i915_gem_set_tiling __user *) data, &args, sizeof(args));
 328         if ( ret != 0)
 329                 DRM_ERROR(" gem set tiling error! %d", ret);
 330 
 331         drm_gem_object_unreference(obj);
 332         spin_unlock(&dev->struct_mutex);
 333 
 334         return 0;
 335 }
 336 
 337 /**
 338  * Returns the current tiling mode and required bit 6 swizzling for the object.
 339  */
 340 /*ARGSUSED*/
 341 int
 342 i915_gem_get_tiling(DRM_IOCTL_ARGS)
 343 {
 344         DRM_DEVICE;
 345         struct drm_i915_gem_get_tiling args;
 346         drm_i915_private_t *dev_priv = dev->dev_private;
 347         struct drm_gem_object *obj;
 348         struct drm_i915_gem_object *obj_priv;
 349         int ret;
 350 
 351         if (dev->driver->use_gem != 1)
 352                 return ENODEV;
 353 
 354         DRM_COPYFROM_WITH_RETURN(&args,
 355             (struct drm_i915_gem_get_tiling __user *) data, sizeof(args));
 356 
 357         obj = drm_gem_object_lookup(fpriv, args.handle);
 358         if (obj == NULL)
 359                 return EINVAL;
 360         obj_priv = obj->driver_private;
 361 
 362         spin_lock(&dev->struct_mutex);
 363 
 364         args.tiling_mode = obj_priv->tiling_mode;
 365         switch (obj_priv->tiling_mode) {
 366         case I915_TILING_X:
 367                 args.swizzle_mode = dev_priv->mm.bit_6_swizzle_x;
 368                 break;
 369         case I915_TILING_Y:
 370                 args.swizzle_mode = dev_priv->mm.bit_6_swizzle_y;
 371                 break;
 372         case I915_TILING_NONE:
 373                 args.swizzle_mode = I915_BIT_6_SWIZZLE_NONE;
 374                 break;
 375         default:
 376                 DRM_ERROR("unknown tiling mode\n");
 377         }
 378 
 379 
 380 
 381         ret = DRM_COPY_TO_USER((struct drm_i915_gem_get_tiling __user *) data, &args, sizeof(args));
 382         if ( ret != 0)
 383                 DRM_ERROR(" gem get tiling error! %d", ret);
 384 
 385         drm_gem_object_unreference(obj);
 386         spin_unlock(&dev->struct_mutex);
 387 
 388         return 0;
 389 }