]> git.karo-electronics.de Git - karo-tx-linux.git/commitdiff
Merge remote branch 'airlied/drm-next' into drm-intel-next
authorEric Anholt <eric@anholt.net>
Thu, 5 Nov 2009 23:04:06 +0000 (15:04 -0800)
committerEric Anholt <eric@anholt.net>
Thu, 5 Nov 2009 23:04:06 +0000 (15:04 -0800)
1  2 
drivers/gpu/drm/drm_irq.c
drivers/gpu/drm/i915/i915_drv.c
drivers/gpu/drm/radeon/r100.c
drivers/gpu/drm/radeon/r600.c
drivers/gpu/drm/radeon/r600d.h
drivers/gpu/drm/radeon/radeon.h
drivers/gpu/drm/radeon/radeon_device.c
drivers/gpu/drm/radeon/radeon_ttm.c
include/drm/drmP.h

index 0a6f0b3bdc787eae8c29f54086dcb089f586b5c9,d9af7964f81cff986d74e1a41efce77937dbd745..72754aca7abfc8ef22db1482cd5e58699f546d96
@@@ -37,7 -37,6 +37,7 @@@
  
  #include <linux/interrupt.h>  /* For task queue support */
  
 +#include <linux/vgaarb.h>
  /**
   * Get interrupt from bus id.
   *
@@@ -172,26 -171,6 +172,26 @@@ err
  }
  EXPORT_SYMBOL(drm_vblank_init);
  
 +static void drm_irq_vgaarb_nokms(void *cookie, bool state)
 +{
 +      struct drm_device *dev = cookie;
 +
 +      if (dev->driver->vgaarb_irq) {
 +              dev->driver->vgaarb_irq(dev, state);
 +              return;
 +      }
 +
 +      if (!dev->irq_enabled)
 +              return;
 +
 +      if (state)
 +              dev->driver->irq_uninstall(dev);
 +      else {
 +              dev->driver->irq_preinstall(dev);
 +              dev->driver->irq_postinstall(dev);
 +      }
 +}
 +
  /**
   * Install IRQ handler.
   *
@@@ -252,9 -231,6 +252,9 @@@ int drm_irq_install(struct drm_device *
                return ret;
        }
  
 +      if (!drm_core_check_feature(dev, DRIVER_MODESET))
 +              vga_client_register(dev->pdev, (void *)dev, drm_irq_vgaarb_nokms, NULL);
 +
        /* After installing handler */
        ret = dev->driver->irq_postinstall(dev);
        if (ret < 0) {
@@@ -303,9 -279,6 +303,9 @@@ int drm_irq_uninstall(struct drm_devic
  
        DRM_DEBUG("irq=%d\n", dev->pdev->irq);
  
 +      if (!drm_core_check_feature(dev, DRIVER_MODESET))
 +              vga_client_register(dev->pdev, NULL, NULL, NULL);
 +
        dev->driver->irq_uninstall(dev);
  
        free_irq(dev->pdev->irq, dev);
@@@ -550,6 -523,62 +550,62 @@@ out
        return ret;
  }
  
+ static int drm_queue_vblank_event(struct drm_device *dev, int pipe,
+                                 union drm_wait_vblank *vblwait,
+                                 struct drm_file *file_priv)
+ {
+       struct drm_pending_vblank_event *e;
+       struct timeval now;
+       unsigned long flags;
+       unsigned int seq;
+       e = kzalloc(sizeof *e, GFP_KERNEL);
+       if (e == NULL)
+               return -ENOMEM;
+       e->pipe = pipe;
+       e->event.base.type = DRM_EVENT_VBLANK;
+       e->event.base.length = sizeof e->event;
+       e->event.user_data = vblwait->request.signal;
+       e->base.event = &e->event.base;
+       e->base.file_priv = file_priv;
+       e->base.destroy = (void (*) (struct drm_pending_event *)) kfree;
+       do_gettimeofday(&now);
+       spin_lock_irqsave(&dev->event_lock, flags);
+       if (file_priv->event_space < sizeof e->event) {
+               spin_unlock_irqrestore(&dev->event_lock, flags);
+               kfree(e);
+               return -ENOMEM;
+       }
+       file_priv->event_space -= sizeof e->event;
+       seq = drm_vblank_count(dev, pipe);
+       if ((vblwait->request.type & _DRM_VBLANK_NEXTONMISS) &&
+           (seq - vblwait->request.sequence) <= (1 << 23)) {
+               vblwait->request.sequence = seq + 1;
+       }
+       DRM_DEBUG("event on vblank count %d, current %d, crtc %d\n",
+                 vblwait->request.sequence, seq, pipe);
+       e->event.sequence = vblwait->request.sequence;
+       if ((seq - vblwait->request.sequence) <= (1 << 23)) {
+               e->event.tv_sec = now.tv_sec;
+               e->event.tv_usec = now.tv_usec;
+               drm_vblank_put(dev, e->pipe);
+               list_add_tail(&e->base.link, &e->base.file_priv->event_list);
+               wake_up_interruptible(&e->base.file_priv->event_wait);
+       } else {
+               list_add_tail(&e->base.link, &dev->vblank_event_list);
+       }
+       spin_unlock_irqrestore(&dev->event_lock, flags);
+       return 0;
+ }
  /**
   * Wait for VBLANK.
   *
@@@ -609,6 -638,9 +665,9 @@@ int drm_wait_vblank(struct drm_device *
                goto done;
        }
  
+       if (flags & _DRM_VBLANK_EVENT)
+               return drm_queue_vblank_event(dev, crtc, vblwait, file_priv);
        if ((flags & _DRM_VBLANK_NEXTONMISS) &&
            (seq - vblwait->request.sequence) <= (1<<23)) {
                vblwait->request.sequence = seq + 1;
@@@ -641,6 -673,38 +700,38 @@@ done
        return ret;
  }
  
+ void drm_handle_vblank_events(struct drm_device *dev, int crtc)
+ {
+       struct drm_pending_vblank_event *e, *t;
+       struct timeval now;
+       unsigned long flags;
+       unsigned int seq;
+       do_gettimeofday(&now);
+       seq = drm_vblank_count(dev, crtc);
+       spin_lock_irqsave(&dev->event_lock, flags);
+       list_for_each_entry_safe(e, t, &dev->vblank_event_list, base.link) {
+               if (e->pipe != crtc)
+                       continue;
+               if ((seq - e->event.sequence) > (1<<23))
+                       continue;
+               DRM_DEBUG("vblank event on %d, current %d\n",
+                         e->event.sequence, seq);
+               e->event.sequence = seq;
+               e->event.tv_sec = now.tv_sec;
+               e->event.tv_usec = now.tv_usec;
+               drm_vblank_put(dev, e->pipe);
+               list_move_tail(&e->base.link, &e->base.file_priv->event_list);
+               wake_up_interruptible(&e->base.file_priv->event_wait);
+       }
+       spin_unlock_irqrestore(&dev->event_lock, flags);
+ }
  /**
   * drm_handle_vblank - handle a vblank event
   * @dev: DRM device
   */
  void drm_handle_vblank(struct drm_device *dev, int crtc)
  {
+       if (!dev->num_crtcs)
+               return;
        atomic_inc(&dev->_vblank_count[crtc]);
        DRM_WAKEUP(&dev->vbl_queue[crtc]);
+       drm_handle_vblank_events(dev, crtc);
  }
  EXPORT_SYMBOL(drm_handle_vblank);
index 7f436ec075f6221df8ee6beafebae603054be003,b81305e33c792984c5e14089946ee3da035b4313..2fa217862058d849fe3b5723cf578297777cbe53
@@@ -89,9 -89,6 +89,9 @@@ static int i915_suspend(struct drm_devi
                pci_set_power_state(dev->pdev, PCI_D3hot);
        }
  
 +      /* Modeset on resume, not lid events */
 +      dev_priv->modeset_on_lid = 0;
 +
        return 0;
  }
  
@@@ -100,6 -97,8 +100,6 @@@ static int i915_resume(struct drm_devic
        struct drm_i915_private *dev_priv = dev->dev_private;
        int ret = 0;
  
 -      pci_set_power_state(dev->pdev, PCI_D0);
 -      pci_restore_state(dev->pdev);
        if (pci_enable_device(dev->pdev))
                return -1;
        pci_set_master(dev->pdev);
                drm_helper_resume_force_mode(dev);
        }
  
 +      dev_priv->modeset_on_lid = 0;
 +
        return ret;
  }
  
 +/**
 + * i965_reset - reset chip after a hang
 + * @dev: drm device to reset
 + * @flags: reset domains
 + *
 + * Reset the chip.  Useful if a hang is detected. Returns zero on successful
 + * reset or otherwise an error code.
 + *
 + * Procedure is fairly simple:
 + *   - reset the chip using the reset reg
 + *   - re-init context state
 + *   - re-init hardware status page
 + *   - re-init ring buffer
 + *   - re-init interrupt state
 + *   - re-init display
 + */
 +int i965_reset(struct drm_device *dev, u8 flags)
 +{
 +      drm_i915_private_t *dev_priv = dev->dev_private;
 +      unsigned long timeout;
 +      u8 gdrst;
 +      /*
 +       * We really should only reset the display subsystem if we actually
 +       * need to
 +       */
 +      bool need_display = true;
 +
 +      mutex_lock(&dev->struct_mutex);
 +
 +      /*
 +       * Clear request list
 +       */
 +      i915_gem_retire_requests(dev);
 +
 +      if (need_display)
 +              i915_save_display(dev);
 +
 +      if (IS_I965G(dev) || IS_G4X(dev)) {
 +              /*
 +               * Set the domains we want to reset, then the reset bit (bit 0).
 +               * Clear the reset bit after a while and wait for hardware status
 +               * bit (bit 1) to be set
 +               */
 +              pci_read_config_byte(dev->pdev, GDRST, &gdrst);
 +              pci_write_config_byte(dev->pdev, GDRST, gdrst | flags | ((flags == GDRST_FULL) ? 0x1 : 0x0));
 +              udelay(50);
 +              pci_write_config_byte(dev->pdev, GDRST, gdrst & 0xfe);
 +
 +              /* ...we don't want to loop forever though, 500ms should be plenty */
 +             timeout = jiffies + msecs_to_jiffies(500);
 +              do {
 +                      udelay(100);
 +                      pci_read_config_byte(dev->pdev, GDRST, &gdrst);
 +              } while ((gdrst & 0x1) && time_after(timeout, jiffies));
 +
 +              if (gdrst & 0x1) {
 +                      WARN(true, "i915: Failed to reset chip\n");
 +                      mutex_unlock(&dev->struct_mutex);
 +                      return -EIO;
 +              }
 +      } else {
 +              DRM_ERROR("Error occurred. Don't know how to reset this chip.\n");
 +              return -ENODEV;
 +      }
 +
 +      /* Ok, now get things going again... */
 +
 +      /*
 +       * Everything depends on having the GTT running, so we need to start
 +       * there.  Fortunately we don't need to do this unless we reset the
 +       * chip at a PCI level.
 +       *
 +       * Next we need to restore the context, but we don't use those
 +       * yet either...
 +       *
 +       * Ring buffer needs to be re-initialized in the KMS case, or if X
 +       * was running at the time of the reset (i.e. we weren't VT
 +       * switched away).
 +       */
 +      if (drm_core_check_feature(dev, DRIVER_MODESET) ||
 +          !dev_priv->mm.suspended) {
 +              drm_i915_ring_buffer_t *ring = &dev_priv->ring;
 +              struct drm_gem_object *obj = ring->ring_obj;
 +              struct drm_i915_gem_object *obj_priv = obj->driver_private;
 +              dev_priv->mm.suspended = 0;
 +
 +              /* Stop the ring if it's running. */
 +              I915_WRITE(PRB0_CTL, 0);
 +              I915_WRITE(PRB0_TAIL, 0);
 +              I915_WRITE(PRB0_HEAD, 0);
 +
 +              /* Initialize the ring. */
 +              I915_WRITE(PRB0_START, obj_priv->gtt_offset);
 +              I915_WRITE(PRB0_CTL,
 +                         ((obj->size - 4096) & RING_NR_PAGES) |
 +                         RING_NO_REPORT |
 +                         RING_VALID);
 +              if (!drm_core_check_feature(dev, DRIVER_MODESET))
 +                      i915_kernel_lost_context(dev);
 +              else {
 +                      ring->head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
 +                      ring->tail = I915_READ(PRB0_TAIL) & TAIL_ADDR;
 +                      ring->space = ring->head - (ring->tail + 8);
 +                      if (ring->space < 0)
 +                              ring->space += ring->Size;
 +              }
 +
 +              mutex_unlock(&dev->struct_mutex);
 +              drm_irq_uninstall(dev);
 +              drm_irq_install(dev);
 +              mutex_lock(&dev->struct_mutex);
 +      }
 +
 +      /*
 +       * Display needs restore too...
 +       */
 +      if (need_display)
 +              i915_restore_display(dev);
 +
 +      mutex_unlock(&dev->struct_mutex);
 +      return 0;
 +}
 +
 +
  static int __devinit
  i915_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
  {
@@@ -333,6 -206,7 +333,7 @@@ static struct drm_driver driver = 
                 .mmap = drm_gem_mmap,
                 .poll = drm_poll,
                 .fasync = drm_fasync,
+                .read = drm_read,
  #ifdef CONFIG_COMPAT
                 .compat_ioctl = i915_compat_ioctl,
  #endif
@@@ -361,8 -235,6 +362,8 @@@ static int __init i915_init(void
  {
        driver.num_ioctls = i915_max_ioctl;
  
 +      i915_gem_shrinker_init();
 +
        /*
         * If CONFIG_DRM_I915_KMS is set, default to KMS unless
         * explicitly disabled with the module pararmeter.
  
  static void __exit i915_exit(void)
  {
 +      i915_gem_shrinker_exit();
        drm_exit(&driver);
  }
  
index 161094c07d9443957b8f193386e28db258244620,5e821a313a8ce81e7dd28c83a885255bb46bda79..c9e93eabcf16a7c877b9064e7fa86c2f4010cb2b
@@@ -186,7 -186,7 +186,7 @@@ static inline uint32_t r100_irq_ack(str
  
  int r100_irq_process(struct radeon_device *rdev)
  {
-       uint32_t status;
+       uint32_t status, msi_rearm;
  
        status = r100_irq_ack(rdev);
        if (!status) {
                }
                status = r100_irq_ack(rdev);
        }
+       if (rdev->msi_enabled) {
+               switch (rdev->family) {
+               case CHIP_RS400:
+               case CHIP_RS480:
+                       msi_rearm = RREG32(RADEON_AIC_CNTL) & ~RS400_MSI_REARM;
+                       WREG32(RADEON_AIC_CNTL, msi_rearm);
+                       WREG32(RADEON_AIC_CNTL, msi_rearm | RS400_MSI_REARM);
+                       break;
+               default:
+                       msi_rearm = RREG32(RADEON_MSI_REARM_EN) & ~RV370_MSI_REARM_EN;
+                       WREG32(RADEON_MSI_REARM_EN, msi_rearm);
+                       WREG32(RADEON_MSI_REARM_EN, msi_rearm | RV370_MSI_REARM_EN);
+                       break;
+               }
+       }
        return IRQ_HANDLED;
  }
  
@@@ -240,7 -255,7 +255,7 @@@ int r100_wb_init(struct radeon_device *
        int r;
  
        if (rdev->wb.wb_obj == NULL) {
-               r = radeon_object_create(rdev, NULL, 4096,
+               r = radeon_object_create(rdev, NULL, RADEON_GPU_PAGE_SIZE,
                                         true,
                                         RADEON_GEM_DOMAIN_GTT,
                                         false, &rdev->wb.wb_obj);
@@@ -563,19 -578,19 +578,19 @@@ int r100_cp_init(struct radeon_device *
        indirect1_start = 16;
        /* cp setup */
        WREG32(0x718, pre_write_timer | (pre_write_limit << 28));
-       WREG32(RADEON_CP_RB_CNTL,
- #ifdef __BIG_ENDIAN
-              RADEON_BUF_SWAP_32BIT |
- #endif
-              REG_SET(RADEON_RB_BUFSZ, rb_bufsz) |
+       tmp = (REG_SET(RADEON_RB_BUFSZ, rb_bufsz) |
               REG_SET(RADEON_RB_BLKSZ, rb_blksz) |
               REG_SET(RADEON_MAX_FETCH, max_fetch) |
               RADEON_RB_NO_UPDATE);
+ #ifdef __BIG_ENDIAN
+       tmp |= RADEON_BUF_SWAP_32BIT;
+ #endif
+       WREG32(RADEON_CP_RB_CNTL, tmp);
        /* Set ring address */
        DRM_INFO("radeon: ring at 0x%016lX\n", (unsigned long)rdev->cp.gpu_addr);
        WREG32(RADEON_CP_RB_BASE, rdev->cp.gpu_addr);
        /* Force read & write ptr to 0 */
-       tmp = RREG32(RADEON_CP_RB_CNTL);
        WREG32(RADEON_CP_RB_CNTL, tmp | RADEON_RB_RPTR_WR_ENA);
        WREG32(RADEON_CP_RB_RPTR_WR, 0);
        WREG32(RADEON_CP_RB_WPTR, 0);
@@@ -1763,20 -1778,6 +1778,20 @@@ void r100_vram_init_sizes(struct radeon
                rdev->mc.real_vram_size = rdev->mc.aper_size;
  }
  
 +void r100_vga_set_state(struct radeon_device *rdev, bool state)
 +{
 +      uint32_t temp;
 +
 +      temp = RREG32(RADEON_CONFIG_CNTL);
 +      if (state == false) {
 +              temp &= ~(1<<8);
 +              temp |= (1<<9);
 +      } else {
 +              temp &= ~(1<<9);
 +      }
 +      WREG32(RADEON_CONFIG_CNTL, temp);
 +}
 +
  void r100_vram_info(struct radeon_device *rdev)
  {
        r100_vram_get_type(rdev);
@@@ -2364,7 -2365,7 +2379,7 @@@ void r100_bandwidth_update(struct radeo
        /*
          Find the total latency for the display data.
        */
-       disp_latency_overhead.full = rfixed_const(80);
+       disp_latency_overhead.full = rfixed_const(8);
        disp_latency_overhead.full = rfixed_div(disp_latency_overhead, sclk_ff);
        mc_latency_mclk.full += disp_latency_overhead.full + cur_latency_mclk.full;
        mc_latency_sclk.full += disp_latency_overhead.full + cur_latency_sclk.full;
  static inline void r100_cs_track_texture_print(struct r100_cs_track_texture *t)
  {
        DRM_ERROR("pitch                      %d\n", t->pitch);
+       DRM_ERROR("use_pitch                  %d\n", t->use_pitch);
        DRM_ERROR("width                      %d\n", t->width);
+       DRM_ERROR("width_11                   %d\n", t->width_11);
        DRM_ERROR("height                     %d\n", t->height);
+       DRM_ERROR("height_11                  %d\n", t->height_11);
        DRM_ERROR("num levels                 %d\n", t->num_levels);
        DRM_ERROR("depth                      %d\n", t->txdepth);
        DRM_ERROR("bpp                        %d\n", t->cpp);
@@@ -2623,15 -2627,17 +2641,17 @@@ static int r100_cs_track_texture_check(
                                else
                                        w = track->textures[u].pitch / (1 << i);
                        } else {
-                               w = track->textures[u].width / (1 << i);
+                               w = track->textures[u].width;
                                if (rdev->family >= CHIP_RV515)
                                        w |= track->textures[u].width_11;
+                               w = w / (1 << i);
                                if (track->textures[u].roundup_w)
                                        w = roundup_pow_of_two(w);
                        }
-                       h = track->textures[u].height / (1 << i);
+                       h = track->textures[u].height;
                        if (rdev->family >= CHIP_RV515)
                                h |= track->textures[u].height_11;
+                       h = h / (1 << i);
                        if (track->textures[u].roundup_h)
                                h = roundup_pow_of_two(h);
                        size += w * h;
index 609719490ec28c26b064df9d43eb642954fbb442,3e5703f324bd77761d9b634c54644f40cee06ac9..00cd0500ca7f73e9a6cdb430c84688b1341be617
@@@ -339,11 -339,10 +339,10 @@@ int r600_mc_init(struct radeon_device *
  {
        fixed20_12 a;
        u32 tmp;
-       int chansize;
+       int chansize, numchan;
        int r;
  
        /* Get VRAM informations */
-       rdev->mc.vram_width = 128;
        rdev->mc.vram_is_ddr = true;
        tmp = RREG32(RAMCFG);
        if (tmp & CHANSIZE_OVERRIDE) {
        } else {
                chansize = 32;
        }
-       if (rdev->family == CHIP_R600) {
-               rdev->mc.vram_width = 8 * chansize;
-       } else if (rdev->family == CHIP_RV670) {
-               rdev->mc.vram_width = 4 * chansize;
-       } else if ((rdev->family == CHIP_RV610) ||
-                       (rdev->family == CHIP_RV620)) {
-               rdev->mc.vram_width = chansize;
-       } else if ((rdev->family == CHIP_RV630) ||
-                       (rdev->family == CHIP_RV635)) {
-               rdev->mc.vram_width = 2 * chansize;
+       tmp = RREG32(CHMAP);
+       switch ((tmp & NOOFCHAN_MASK) >> NOOFCHAN_SHIFT) {
+       case 0:
+       default:
+               numchan = 1;
+               break;
+       case 1:
+               numchan = 2;
+               break;
+       case 2:
+               numchan = 4;
+               break;
+       case 3:
+               numchan = 8;
+               break;
        }
+       rdev->mc.vram_width = numchan * chansize;
        /* Could aper size report 0 ? */
        rdev->mc.aper_base = drm_get_resource_start(rdev->ddev, 0);
        rdev->mc.aper_size = drm_get_resource_len(rdev->ddev, 0);
                        rdev->mc.gtt_location = rdev->mc.mc_vram_size;
                }
        } else {
-               if (rdev->family == CHIP_RS780 || rdev->family == CHIP_RS880) {
-                       rdev->mc.vram_location = (RREG32(MC_VM_FB_LOCATION) &
-                                                               0xFFFF) << 24;
-                       rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024;
-                       tmp = rdev->mc.vram_location + rdev->mc.mc_vram_size;
-                       if ((0xFFFFFFFFUL - tmp) >= rdev->mc.gtt_size) {
-                               /* Enough place after vram */
-                               rdev->mc.gtt_location = tmp;
-                       } else if (rdev->mc.vram_location >= rdev->mc.gtt_size) {
-                               /* Enough place before vram */
+               rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024;
+               rdev->mc.vram_location = (RREG32(MC_VM_FB_LOCATION) &
+                                                       0xFFFF) << 24;
+               tmp = rdev->mc.vram_location + rdev->mc.mc_vram_size;
+               if ((0xFFFFFFFFUL - tmp) >= rdev->mc.gtt_size) {
+                       /* Enough place after vram */
+                       rdev->mc.gtt_location = tmp;
+               } else if (rdev->mc.vram_location >= rdev->mc.gtt_size) {
+                       /* Enough place before vram */
+                       rdev->mc.gtt_location = 0;
+               } else {
+                       /* Not enough place after or before shrink
+                        * gart size
+                        */
+                       if (rdev->mc.vram_location > (0xFFFFFFFFUL - tmp)) {
                                rdev->mc.gtt_location = 0;
+                               rdev->mc.gtt_size = rdev->mc.vram_location;
                        } else {
-                               /* Not enough place after or before shrink
-                                * gart size
-                                */
-                               if (rdev->mc.vram_location > (0xFFFFFFFFUL - tmp)) {
-                                       rdev->mc.gtt_location = 0;
-                                       rdev->mc.gtt_size = rdev->mc.vram_location;
-                               } else {
-                                       rdev->mc.gtt_location = tmp;
-                                       rdev->mc.gtt_size = 0xFFFFFFFFUL - tmp;
-                               }
+                               rdev->mc.gtt_location = tmp;
+                               rdev->mc.gtt_size = 0xFFFFFFFFUL - tmp;
                        }
-                       rdev->mc.gtt_location = rdev->mc.mc_vram_size;
-               } else {
-                       rdev->mc.vram_location = 0x00000000UL;
-                       rdev->mc.gtt_location = rdev->mc.mc_vram_size;
-                       rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024;
                }
+               rdev->mc.gtt_location = rdev->mc.mc_vram_size;
        }
        rdev->mc.vram_start = rdev->mc.vram_location;
        rdev->mc.vram_end = rdev->mc.vram_location + rdev->mc.mc_vram_size - 1;
@@@ -1267,19 -1266,17 +1266,17 @@@ int r600_cp_resume(struct radeon_devic
  
        /* Set ring buffer size */
        rb_bufsz = drm_order(rdev->cp.ring_size / 8);
+       tmp = RB_NO_UPDATE | (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
  #ifdef __BIG_ENDIAN
-       WREG32(CP_RB_CNTL, BUF_SWAP_32BIT | RB_NO_UPDATE |
-               (drm_order(4096/8) << 8) | rb_bufsz);
- #else
-       WREG32(CP_RB_CNTL, RB_NO_UPDATE | (drm_order(4096/8) << 8) | rb_bufsz);
+       tmp |= BUF_SWAP_32BIT;
  #endif
+       WREG32(CP_RB_CNTL, tmp);
        WREG32(CP_SEM_WAIT_TIMER, 0x4);
  
        /* Set the write pointer delay */
        WREG32(CP_RB_WPTR_DELAY, 0);
  
        /* Initialize the ring buffer's read and write pointers */
-       tmp = RREG32(CP_RB_CNTL);
        WREG32(CP_RB_CNTL, tmp | RB_RPTR_WR_ENA);
        WREG32(CP_RB_RPTR_WR, 0);
        WREG32(CP_RB_WPTR, 0);
@@@ -1400,7 -1397,7 +1397,7 @@@ int r600_wb_enable(struct radeon_devic
        int r;
  
        if (rdev->wb.wb_obj == NULL) {
-               r = radeon_object_create(rdev, NULL, 4096, true,
+               r = radeon_object_create(rdev, NULL, RADEON_GPU_PAGE_SIZE, true,
                                RADEON_GEM_DOMAIN_GTT, false, &rdev->wb.wb_obj);
                if (r) {
                        dev_warn(rdev->dev, "failed to create WB buffer (%d).\n", r);
@@@ -1450,8 -1447,8 +1447,8 @@@ int r600_copy_blit(struct radeon_devic
                   uint64_t src_offset, uint64_t dst_offset,
                   unsigned num_pages, struct radeon_fence *fence)
  {
-       r600_blit_prepare_copy(rdev, num_pages * 4096);
-       r600_kms_blit_copy(rdev, src_offset, dst_offset, num_pages * 4096);
+       r600_blit_prepare_copy(rdev, num_pages * RADEON_GPU_PAGE_SIZE);
+       r600_kms_blit_copy(rdev, src_offset, dst_offset, num_pages * RADEON_GPU_PAGE_SIZE);
        r600_blit_done_copy(rdev, fence);
        return 0;
  }
@@@ -1534,20 -1531,6 +1531,20 @@@ int r600_startup(struct radeon_device *
        return 0;
  }
  
 +void r600_vga_set_state(struct radeon_device *rdev, bool state)
 +{
 +      uint32_t temp;
 +
 +      temp = RREG32(CONFIG_CNTL);
 +      if (state == false) {
 +              temp &= ~(1<<0);
 +              temp |= (1<<1);
 +      } else {
 +              temp &= ~(1<<1);
 +      }
 +      WREG32(CONFIG_CNTL, temp);
 +}
 +
  int r600_resume(struct radeon_device *rdev)
  {
        int r;
index 9b64d47f1f82ff4116f0c8b25872730843f92c0a,cf238bf5700ff2c72daeade0ea9139e2c6c80371..00d9642198a3bba6a1c4dbc03da11777e828183e
@@@ -78,7 -78,6 +78,7 @@@
  #define CB_COLOR0_MASK                                  0x28100
  
  #define       CONFIG_MEMSIZE                                  0x5428
 +#define CONFIG_CNTL                                   0x5424
  #define       CP_STAT                                         0x8680
  #define       CP_COHER_BASE                                   0x85F8
  #define       CP_DEBUG                                        0xC1FC
  #define       PCIE_PORT_INDEX                                 0x0038
  #define       PCIE_PORT_DATA                                  0x003C
  
+ #define CHMAP                                         0x2004
+ #define               NOOFCHAN_SHIFT                                  12
+ #define               NOOFCHAN_MASK                                   0x00003000
  #define RAMCFG                                                0x2408
  #define               NOOFBANK_SHIFT                                  0
  #define               NOOFBANK_MASK                                   0x00000001
index 5ab35b81c86bfdeedf71ec6ff2b03e114bcfb4ec,ea3efd7ae85bb857c1154e11771897c8e359dcea..620a7c8ca01642614534ea9d5434007ab9a634a6
@@@ -276,6 -276,8 +276,8 @@@ union radeon_gart_table 
        struct radeon_gart_table_vram   vram;
  };
  
+ #define RADEON_GPU_PAGE_SIZE 4096
  struct radeon_gart {
        dma_addr_t                      table_addr;
        unsigned                        num_gpu_pages;
@@@ -590,7 -592,6 +592,7 @@@ struct radeon_asic 
        void (*fini)(struct radeon_device *rdev);
        int (*resume)(struct radeon_device *rdev);
        int (*suspend)(struct radeon_device *rdev);
 +      void (*vga_set_state)(struct radeon_device *rdev, bool state);
        int (*gpu_reset)(struct radeon_device *rdev);
        void (*gart_tlb_flush)(struct radeon_device *rdev);
        int (*gart_set_page)(struct radeon_device *rdev, int i, uint64_t addr);
@@@ -783,6 -784,7 +785,7 @@@ struct radeon_device 
        const struct firmware *me_fw;   /* all family ME firmware */
        const struct firmware *pfp_fw;  /* r6/700 PFP firmware */
        struct r600_blit r600_blit;
+       int msi_enabled; /* msi enabled */
  };
  
  int radeon_device_init(struct radeon_device *rdev,
@@@ -937,7 -939,6 +940,7 @@@ static inline void radeon_ring_write(st
  #define radeon_resume(rdev) (rdev)->asic->resume((rdev))
  #define radeon_suspend(rdev) (rdev)->asic->suspend((rdev))
  #define radeon_cs_parse(p) rdev->asic->cs_parse((p))
 +#define radeon_vga_set_state(rdev, state) (rdev)->asic->vga_set_state((rdev), (state))
  #define radeon_gpu_reset(rdev) (rdev)->asic->gpu_reset((rdev))
  #define radeon_gart_tlb_flush(rdev) (rdev)->asic->gart_tlb_flush((rdev))
  #define radeon_gart_set_page(rdev, i, p) (rdev)->asic->gart_set_page((rdev), (i), (p))
index df988142e6b0f8fc01d981859fc07dd71d2f27d2,88c19070247f5ebbc56fbfa9482bea0631e37bdf..e3f9edfa40fe8a5d63b64eee70e91345c4a1bd7b
@@@ -29,7 -29,6 +29,7 @@@
  #include <drm/drmP.h>
  #include <drm/drm_crtc_helper.h>
  #include <drm/radeon_drm.h>
 +#include <linux/vgaarb.h>
  #include "radeon_reg.h"
  #include "radeon.h"
  #include "radeon_asic.h"
@@@ -444,20 -443,24 +444,24 @@@ static uint32_t cail_reg_read(struct ca
        return r;
  }
  
- static struct card_info atom_card_info = {
-       .dev = NULL,
-       .reg_read = cail_reg_read,
-       .reg_write = cail_reg_write,
-       .mc_read = cail_mc_read,
-       .mc_write = cail_mc_write,
-       .pll_read = cail_pll_read,
-       .pll_write = cail_pll_write,
- };
  int radeon_atombios_init(struct radeon_device *rdev)
  {
-       atom_card_info.dev = rdev->ddev;
-       rdev->mode_info.atom_context = atom_parse(&atom_card_info, rdev->bios);
+       struct card_info *atom_card_info =
+           kzalloc(sizeof(struct card_info), GFP_KERNEL);
+       if (!atom_card_info)
+               return -ENOMEM;
+       rdev->mode_info.atom_card_info = atom_card_info;
+       atom_card_info->dev = rdev->ddev;
+       atom_card_info->reg_read = cail_reg_read;
+       atom_card_info->reg_write = cail_reg_write;
+       atom_card_info->mc_read = cail_mc_read;
+       atom_card_info->mc_write = cail_mc_write;
+       atom_card_info->pll_read = cail_pll_read;
+       atom_card_info->pll_write = cail_pll_write;
+       rdev->mode_info.atom_context = atom_parse(atom_card_info, rdev->bios);
        radeon_atom_initialize_bios_scratch_regs(rdev->ddev);
        return 0;
  }
  void radeon_atombios_fini(struct radeon_device *rdev)
  {
        kfree(rdev->mode_info.atom_context);
+       kfree(rdev->mode_info.atom_card_info);
  }
  
  int radeon_combios_init(struct radeon_device *rdev)
@@@ -477,18 -481,6 +482,18 @@@ void radeon_combios_fini(struct radeon_
  {
  }
  
 +/* if we get transitioned to only one device, tak VGA back */
 +static unsigned int radeon_vga_set_decode(void *cookie, bool state)
 +{
 +      struct radeon_device *rdev = cookie;
 +      radeon_vga_set_state(rdev, state);
 +      if (state)
 +              return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
 +                     VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
 +      else
 +              return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
 +}
 +
  void radeon_agp_disable(struct radeon_device *rdev)
  {
        rdev->flags &= ~RADEON_IS_AGP;
@@@ -581,15 -573,9 +586,15 @@@ int radeon_device_init(struct radeon_de
        DRM_INFO("register mmio base: 0x%08X\n", (uint32_t)rdev->rmmio_base);
        DRM_INFO("register mmio size: %u\n", (unsigned)rdev->rmmio_size);
  
 +      /* if we have > 1 VGA cards, then disable the radeon VGA resources */
 +      /* this will fail for cards that aren't VGA class devices, just
 +       * ignore it */
 +      vga_client_register(rdev->pdev, rdev, NULL, radeon_vga_set_decode);
 +
        r = radeon_init(rdev);
        if (r)
                return r;
 +
        if (rdev->flags & RADEON_IS_AGP && !rdev->accel_working) {
                /* Acceleration not working on AGP card try again
                 * with fallback to PCI or PCIE GART
@@@ -614,8 -600,8 +619,8 @@@ void radeon_device_fini(struct radeon_d
  {
        DRM_INFO("radeon: finishing device.\n");
        rdev->shutdown = true;
 -      /* Order matter so becarefull if you rearrange anythings */
        radeon_fini(rdev);
 +      vga_client_register(rdev->pdev, NULL, NULL, NULL);
        iounmap(rdev->rmmio);
        rdev->rmmio = NULL;
  }
index 765bd184b6fc15382fc10b2dfaadd91414709054,f489c0de6f1330ed3596f1d06d8a99461f1f483e..1381e06d6af3ff317521aba0952ab5b2e1a397a9
@@@ -295,6 -295,12 +295,12 @@@ static int radeon_move_vram_ram(struct 
        if (unlikely(r)) {
                return r;
        }
+       r = ttm_tt_set_placement_caching(bo->ttm, tmp_mem.placement);
+       if (unlikely(r)) {
+               goto out_cleanup;
+       }
        r = ttm_tt_bind(bo->ttm, &tmp_mem);
        if (unlikely(r)) {
                goto out_cleanup;
@@@ -530,7 -536,7 +536,7 @@@ void radeon_ttm_fini(struct radeon_devi
  }
  
  static struct vm_operations_struct radeon_ttm_vm_ops;
 -static struct vm_operations_struct *ttm_vm_ops = NULL;
 +static const struct vm_operations_struct *ttm_vm_ops = NULL;
  
  static int radeon_ttm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
  {
diff --combined include/drm/drmP.h
index c8e64bbadbcf3c99ccc4eba20a3e13610779a9a9,fe52254df60c66309040a1d5337ac3fbefce180d..b0b36838ab11a26b155d983b8b64726aa3362905
@@@ -426,6 -426,14 +426,14 @@@ struct drm_buf_entry 
        struct drm_freelist freelist;
  };
  
+ /* Event queued up for userspace to read */
+ struct drm_pending_event {
+       struct drm_event *event;
+       struct list_head link;
+       struct drm_file *file_priv;
+       void (*destroy)(struct drm_pending_event *event);
+ };
  /** File private data */
  struct drm_file {
        int authenticated;
        struct drm_master *master; /* master this node is currently associated with
                                      N.B. not always minor->master */
        struct list_head fbs;
+       wait_queue_head_t event_wait;
+       struct list_head event_list;
+       int event_space;
  };
  
  /** Wait queue */
@@@ -810,9 -822,6 +822,9 @@@ struct drm_driver 
        int (*gem_init_object) (struct drm_gem_object *obj);
        void (*gem_free_object) (struct drm_gem_object *obj);
  
 +      /* vga arb irq handler */
 +      void (*vgaarb_irq)(struct drm_device *dev, bool state);
 +
        /* Driver private ops for this object */
        struct vm_operations_struct *gem_vm_ops;
  
@@@ -900,6 -909,12 +912,12 @@@ struct drm_minor 
        struct drm_mode_group mode_group;
  };
  
+ struct drm_pending_vblank_event {
+       struct drm_pending_event base;
+       int pipe;
+       struct drm_event_vblank event;
+ };
  /**
   * DRM device structure. This structure represent a complete card that
   * may contain multiple heads.
@@@ -999,6 -1014,12 +1017,12 @@@ struct drm_device 
  
        u32 max_vblank_count;           /**< size of vblank counter register */
  
+       /**
+        * List of events
+        */
+       struct list_head vblank_event_list;
+       spinlock_t event_lock;
        /*@} */
        cycles_t ctx_start;
        cycles_t lck_start;
@@@ -1135,6 -1156,8 +1159,8 @@@ extern int drm_lastclose(struct drm_dev
  extern int drm_open(struct inode *inode, struct file *filp);
  extern int drm_stub_open(struct inode *inode, struct file *filp);
  extern int drm_fasync(int fd, struct file *filp, int on);
+ extern ssize_t drm_read(struct file *filp, char __user *buffer,
+                       size_t count, loff_t *offset);
  extern int drm_release(struct inode *inode, struct file *filp);
  
                                /* Mapping support (drm_vm.h) */