]> asedeno.scripts.mit.edu Git - linux.git/commitdiff
Merge branch 'drm-next' of git://people.freedesktop.org/~airlied/linux
authorLinus Torvalds <torvalds@linux-foundation.org>
Fri, 8 Aug 2014 00:36:12 +0000 (17:36 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Fri, 8 Aug 2014 00:36:12 +0000 (17:36 -0700)
Pull DRM updates from Dave Airlie:
 "Like all good pull reqs this ends with a revert, so it must mean we
  tested it,

[ Ed. That's _one_ way of looking at it ]

  This pull is missing nouveau, Ben has been stuck trying to track down
  a very longstanding bug that revealed itself due to some other
  changes.  I've asked him to send you a direct pull request for nouveau
  once he cleans things up.  I'm away until Monday so don't want to
  delay things, you can make a decision on that when he sends it, I have
  my phone so I can ack things just not really merge much.

  It has one trivial conflict with your tree in armada_drv.c, and also
  the pull request contains some component changes that are already in
  your tree, the base tree from Russell went via Greg's tree already,
  but some stuff still shows up in here that doesn't when I merge my
  tree into yours.

  Otherwise all pretty standard graphics fare, one new driver and
  changes all over the place.

  New drivers:
   - sti kms driver for STMicroelectronics chipsets stih416 and stih407.

  core:
   - lots of cleanups to the drm core
   - DP MST helper code merged
   - universal cursor planes.
   - render nodes enabled by default

  panel:
   - better panel interfaces
   - new panel support
   - non-continuous cock advertising ability

  ttm:
   - shrinker fixes

  i915:
   - hopefully ditched UMS support
   - runtime pm fixes
   - psr tracking and locking - now enabled by default
   - userptr fixes
   - backlight brightness fixes
   - MST support merged
   - runtime PM for dpms
   - primary planes locking fixes
   - gen8 hw semaphore support
   - fbc fixes
   - runtime PM on SOix sleep state hw.
   - mmio base page flipping
   - lots of vlv/chv fixes.
   - universal cursor planes

  radeon:
   - Hawaii fixes
   - display scalar support for non-fixed mode displays
   - new firmware format support
   - dpm on more asics by default
   - GPUVM improvements
   - uncached and wc GTT buffers
   - BOs > visible VRAM

  exynos:
   - i80 interface support
   - module auto-loading
   - ipp driver consolidated.

  armada:
   - irq handling in crtc layer only
   - crtc renumbering
   - add component support
   - DT interaction changes.

  tegra:
   - load as module fixes
   - eDP bpp and sync polarity fixed
   - DSI non-continuous clock mode support
   - better support for importing buffers from nouveau

  msm:
   - mdp5/adq8084 v1.3 hw enablement
   - devicetree clk changse
   - ifc6410 board working

  tda998x:
   - component support
   - DT documentation update

  vmwgfx:
   - fix compat shader namespace"

* 'drm-next' of git://people.freedesktop.org/~airlied/linux: (551 commits)
  Revert "drm: drop redundant drm_file->is_master"
  drm/panel: simple: Use devm_gpiod_get_optional()
  drm/dsi: Replace upcasting macro by function
  drm/panel: ld9040: Replace upcasting macro by function
  drm/exynos: dp: Modify driver to support drm_panel
  drm/exynos: Move DP setup into commit()
  drm/panel: simple: Add AUO B133HTN01 panel support
  drm/panel: simple: Support delays in panel functions
  drm/panel: simple: Add proper definition for prepare and unprepare
  drm/panel: s6e8aa0: Add proper definition for prepare and unprepare
  drm/panel: ld9040: Add proper definition for prepare and unprepare
  drm/tegra: Add support for panel prepare and unprepare routines
  drm/exynos: dsi: Add support for panel prepare and unprepare routines
  drm/exynos: dpi: Add support for panel prepare and unprepare routines
  drm/panel: simple: Add dummy prepare and unprepare routines
  drm/panel: s6e8aa0: Add dummy prepare and unprepare routines
  drm/panel: ld9040: Add dummy prepare and unprepare routines
  drm/panel: Provide convenience wrapper for .get_modes()
  drm/panel: add .prepare() and .unprepare() functions
  drm/panel: simple: Remove simple-panel compatible
  ...

13 files changed:
1  2 
arch/arm/boot/dts/exynos4.dtsi
arch/arm/boot/dts/exynos5.dtsi
arch/arm/boot/dts/exynos5420.dtsi
drivers/gpu/drm/armada/armada_crtc.c
drivers/gpu/drm/i915/i915_drv.h
drivers/gpu/drm/i915/i915_gem.c
drivers/gpu/drm/i915/intel_pm.c
drivers/gpu/drm/radeon/radeon_drv.c
drivers/gpu/drm/radeon/radeon_prime.c
drivers/gpu/drm/tegra/gem.c
drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
drivers/staging/imx-drm/imx-drm-core.c
include/drm/drmP.h

index 93bcc1fe8a4e7f61623cbdda24adf552079aa5b6,d9cb972d99146533ba63ce205e39db2287297197..bd3b9b537976fceb383b83c5453fe9e0560530d0
                fimc1 = &fimc_1;
                fimc2 = &fimc_2;
                fimc3 = &fimc_3;
 +              serial0 = &serial_0;
 +              serial1 = &serial_1;
 +              serial2 = &serial_2;
 +              serial3 = &serial_3;
        };
  
        clock_audss: clock-controller@03810000 {
                status = "disabled";
        };
  
 -      serial@13800000 {
 +      serial_0: serial@13800000 {
                compatible = "samsung,exynos4210-uart";
                reg = <0x13800000 0x100>;
                interrupts = <0 52 0>;
                status = "disabled";
        };
  
 -      serial@13810000 {
 +      serial_1: serial@13810000 {
                compatible = "samsung,exynos4210-uart";
                reg = <0x13810000 0x100>;
                interrupts = <0 53 0>;
                status = "disabled";
        };
  
 -      serial@13820000 {
 +      serial_2: serial@13820000 {
                compatible = "samsung,exynos4210-uart";
                reg = <0x13820000 0x100>;
                interrupts = <0 54 0>;
                status = "disabled";
        };
  
 -      serial@13830000 {
 +      serial_3: serial@13830000 {
                compatible = "samsung,exynos4210-uart";
                reg = <0x13830000 0x100>;
                interrupts = <0 55 0>;
                clocks = <&clock CLK_SCLK_FIMD0>, <&clock CLK_FIMD0>;
                clock-names = "sclk_fimd", "fimd";
                samsung,power-domain = <&pd_lcd0>;
+               samsung,sysreg = <&sys_reg>;
                status = "disabled";
        };
  };
index ff2d2cb0f79e083f94649ac3e91025f1c5ceb89b,fdead12952a163e765286802b9d5e282f3668346..a0cc0b6f8f96d52c24729dd1432327e4935f389b
  / {
        interrupt-parent = <&gic>;
  
 +      aliases {
 +              serial0 = &serial_0;
 +              serial1 = &serial_1;
 +              serial2 = &serial_2;
 +              serial3 = &serial_3;
 +      };
 +
        chipid@10000000 {
                compatible = "samsung,exynos4210-chipid";
                reg = <0x10000000 0x100>;
                interrupts = <1 9 0xf04>;
        };
  
 -      serial@12C00000 {
 +      serial_0: serial@12C00000 {
                compatible = "samsung,exynos4210-uart";
                reg = <0x12C00000 0x100>;
                interrupts = <0 51 0>;
        };
  
 -      serial@12C10000 {
 +      serial_1: serial@12C10000 {
                compatible = "samsung,exynos4210-uart";
                reg = <0x12C10000 0x100>;
                interrupts = <0 52 0>;
        };
  
 -      serial@12C20000 {
 +      serial_2: serial@12C20000 {
                compatible = "samsung,exynos4210-uart";
                reg = <0x12C20000 0x100>;
                interrupts = <0 53 0>;
        };
  
 -      serial@12C30000 {
 +      serial_3: serial@12C30000 {
                compatible = "samsung,exynos4210-uart";
                reg = <0x12C30000 0x100>;
                interrupts = <0 54 0>;
@@@ -94,6 -87,7 +94,7 @@@
                reg = <0x14400000 0x40000>;
                interrupt-names = "fifo", "vsync", "lcd_sys";
                interrupts = <18 4>, <18 5>, <18 6>;
+               samsung,sysreg = <&sysreg_system_controller>;
                status = "disabled";
        };
  
index a40a5c2b5a4ff1a776a74e3fe24be0284140b8b9,43004665a48cfe41cf271e47268009c51d0b8aab..08dd681c0019f6c118a5e1fd9516270e96516fcb
                phy-names = "dp";
        };
  
+       mipi_phy: video-phy@10040714 {
+               compatible = "samsung,s5pv210-mipi-video-phy";
+               reg = <0x10040714 12>;
+               #phy-cells = <1>;
+       };
+       dsi@14500000 {
+               compatible = "samsung,exynos5410-mipi-dsi";
+               reg = <0x14500000 0x10000>;
+               interrupts = <0 82 0>;
+               samsung,power-domain = <&disp_pd>;
+               phys = <&mipi_phy 1>;
+               phy-names = "dsim";
+               clocks = <&clock CLK_DSIM1>, <&clock CLK_SCLK_MIPI1>;
+               clock-names = "bus_clk", "pll_clk";
+               #address-cells = <1>;
+               #size-cells = <0>;
+               status = "disabled";
+       };
        fimd: fimd@14400000 {
                samsung,power-domain = <&disp_pd>;
                clocks = <&clock CLK_SCLK_FIMD1>, <&clock CLK_FIMD1>;
        pmu_system_controller: system-controller@10040000 {
                compatible = "samsung,exynos5420-pmu", "syscon";
                reg = <0x10040000 0x5000>;
 +              clock-names = "clkout16";
 +              clocks = <&clock CLK_FIN_PLL>;
 +              #clock-cells = <1>;
        };
  
        sysreg_system_controller: syscon@10050000 {
index 3aedf9e993e65d8d3b8d67c8241d2e7a34b22990,3f620e21e06bb1cd9f10553b400ba27e07d82acb..9a0cc09e665308bd6d6a7c7b518b455f5acd050d
@@@ -7,6 -7,9 +7,9 @@@
   * published by the Free Software Foundation.
   */
  #include <linux/clk.h>
+ #include <linux/component.h>
+ #include <linux/of_device.h>
+ #include <linux/platform_device.h>
  #include <drm/drmP.h>
  #include <drm/drm_crtc_helper.h>
  #include "armada_crtc.h"
@@@ -332,24 -335,23 +335,23 @@@ static void armada_drm_crtc_commit(stru
  static bool armada_drm_crtc_mode_fixup(struct drm_crtc *crtc,
        const struct drm_display_mode *mode, struct drm_display_mode *adj)
  {
-       struct armada_private *priv = crtc->dev->dev_private;
        struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc);
        int ret;
  
        /* We can't do interlaced modes if we don't have the SPU_ADV_REG */
-       if (!priv->variant->has_spu_adv_reg &&
+       if (!dcrtc->variant->has_spu_adv_reg &&
            adj->flags & DRM_MODE_FLAG_INTERLACE)
                return false;
  
        /* Check whether the display mode is possible */
-       ret = priv->variant->crtc_compute_clock(dcrtc, adj, NULL);
+       ret = dcrtc->variant->compute_clock(dcrtc, adj, NULL);
        if (ret)
                return false;
  
        return true;
  }
  
- void armada_drm_crtc_irq(struct armada_crtc *dcrtc, u32 stat)
static void armada_drm_crtc_irq(struct armada_crtc *dcrtc, u32 stat)
  {
        struct armada_vbl_event *e, *n;
        void __iomem *base = dcrtc->base;
        }
  }
  
+ static irqreturn_t armada_drm_irq(int irq, void *arg)
+ {
+       struct armada_crtc *dcrtc = arg;
+       u32 v, stat = readl_relaxed(dcrtc->base + LCD_SPU_IRQ_ISR);
+       /*
+        * This is rediculous - rather than writing bits to clear, we
+        * have to set the actual status register value.  This is racy.
+        */
+       writel_relaxed(0, dcrtc->base + LCD_SPU_IRQ_ISR);
+       /* Mask out those interrupts we haven't enabled */
+       v = stat & dcrtc->irq_ena;
+       if (v & (VSYNC_IRQ|GRA_FRAME_IRQ|DUMB_FRAMEDONE)) {
+               armada_drm_crtc_irq(dcrtc, stat);
+               return IRQ_HANDLED;
+       }
+       return IRQ_NONE;
+ }
  /* These are locked by dev->vbl_lock */
  void armada_drm_crtc_disable_irq(struct armada_crtc *dcrtc, u32 mask)
  {
@@@ -470,7 -493,6 +493,6 @@@ static int armada_drm_crtc_mode_set(str
        struct drm_display_mode *mode, struct drm_display_mode *adj,
        int x, int y, struct drm_framebuffer *old_fb)
  {
-       struct armada_private *priv = crtc->dev->dev_private;
        struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc);
        struct armada_regs regs[17];
        uint32_t lm, rm, tm, bm, val, sclk;
        }
  
        /* Now compute the divider for real */
-       priv->variant->crtc_compute_clock(dcrtc, adj, &sclk);
+       dcrtc->variant->compute_clock(dcrtc, adj, &sclk);
  
        /* Ensure graphic fifo is enabled */
        armada_reg_queue_mod(regs, i, 0, CFG_PDWN64x66, LCD_SPU_SRAM_PARA1);
        dcrtc->v[1].spu_v_porch = tm << 16 | bm;
        val = adj->crtc_hsync_start;
        dcrtc->v[1].spu_adv_reg = val << 20 | val | ADV_VSYNCOFFEN |
-               priv->variant->spu_adv_reg;
+               dcrtc->variant->spu_adv_reg;
  
        if (interlaced) {
                /* Odd interlaced frame */
                dcrtc->v[0].spu_v_porch = dcrtc->v[1].spu_v_porch + 1;
                val = adj->crtc_hsync_start - adj->crtc_htotal / 2;
                dcrtc->v[0].spu_adv_reg = val << 20 | val | ADV_VSYNCOFFEN |
-                       priv->variant->spu_adv_reg;
+                       dcrtc->variant->spu_adv_reg;
        } else {
                dcrtc->v[0] = dcrtc->v[1];
        }
        armada_reg_queue_set(regs, i, dcrtc->v[0].spu_v_h_total,
                           LCD_SPUT_V_H_TOTAL);
  
-       if (priv->variant->has_spu_adv_reg) {
+       if (dcrtc->variant->has_spu_adv_reg) {
                armada_reg_queue_mod(regs, i, dcrtc->v[0].spu_adv_reg,
                                     ADV_VSYNC_L_OFF | ADV_VSYNC_H_OFF |
                                     ADV_VSYNCOFFEN, LCD_SPU_ADV_REG);
@@@ -805,12 -827,11 +827,11 @@@ static int armada_drm_crtc_cursor_set(s
  {
        struct drm_device *dev = crtc->dev;
        struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc);
-       struct armada_private *priv = crtc->dev->dev_private;
        struct armada_gem_object *obj = NULL;
        int ret;
  
        /* If no cursor support, replicate drm's return value */
-       if (!priv->variant->has_spu_adv_reg)
+       if (!dcrtc->variant->has_spu_adv_reg)
                return -ENXIO;
  
        if (handle && w > 0 && h > 0) {
@@@ -858,11 -879,10 +879,10 @@@ static int armada_drm_crtc_cursor_move(
  {
        struct drm_device *dev = crtc->dev;
        struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc);
-       struct armada_private *priv = crtc->dev->dev_private;
        int ret;
  
        /* If no cursor support, replicate drm's return value */
-       if (!priv->variant->has_spu_adv_reg)
+       if (!dcrtc->variant->has_spu_adv_reg)
                return -EFAULT;
  
        mutex_lock(&dev->struct_mutex);
@@@ -888,6 -908,10 +908,10 @@@ static void armada_drm_crtc_destroy(str
        if (!IS_ERR(dcrtc->clk))
                clk_disable_unprepare(dcrtc->clk);
  
+       writel_relaxed(0, dcrtc->base + LCD_SPU_IRQ_ENA);
+       of_node_put(dcrtc->crtc.port);
        kfree(dcrtc);
  }
  
@@@ -1027,21 -1051,24 +1051,22 @@@ static int armada_drm_crtc_create_prope
        return 0;
  }
  
- int armada_drm_crtc_create(struct drm_device *dev, unsigned num,
-       struct resource *res)
+ int armada_drm_crtc_create(struct drm_device *drm, struct device *dev,
+       struct resource *res, int irq, const struct armada_variant *variant,
+       struct device_node *port)
  {
-       struct armada_private *priv = dev->dev_private;
+       struct armada_private *priv = drm->dev_private;
        struct armada_crtc *dcrtc;
        void __iomem *base;
        int ret;
  
-       ret = armada_drm_crtc_create_properties(dev);
+       ret = armada_drm_crtc_create_properties(drm);
        if (ret)
                return ret;
  
-       base = devm_ioremap_resource(dev->dev, res);
 -      base = devm_request_and_ioremap(dev, res);
 -      if (!base) {
 -              DRM_ERROR("failed to ioremap register\n");
 -              return -ENOMEM;
 -      }
++      base = devm_ioremap_resource(dev, res);
 +      if (IS_ERR(base))
 +              return PTR_ERR(base);
  
        dcrtc = kzalloc(sizeof(*dcrtc), GFP_KERNEL);
        if (!dcrtc) {
                return -ENOMEM;
        }
  
+       if (dev != drm->dev)
+               dev_set_drvdata(dev, dcrtc);
+       dcrtc->variant = variant;
        dcrtc->base = base;
-       dcrtc->num = num;
+       dcrtc->num = drm->mode_config.num_crtc;
        dcrtc->clk = ERR_PTR(-EINVAL);
        dcrtc->csc_yuv_mode = CSC_AUTO;
        dcrtc->csc_rgb_mode = CSC_AUTO;
                       CFG_PDWN64x66, dcrtc->base + LCD_SPU_SRAM_PARA1);
        writel_relaxed(0x2032ff81, dcrtc->base + LCD_SPU_DMA_CTRL1);
        writel_relaxed(0x00000000, dcrtc->base + LCD_SPU_GRA_OVSA_HPXL_VLN);
+       writel_relaxed(dcrtc->irq_ena, dcrtc->base + LCD_SPU_IRQ_ENA);
+       writel_relaxed(0, dcrtc->base + LCD_SPU_IRQ_ISR);
+       ret = devm_request_irq(dev, irq, armada_drm_irq, 0, "armada_drm_crtc",
+                              dcrtc);
+       if (ret < 0) {
+               kfree(dcrtc);
+               return ret;
+       }
  
-       if (priv->variant->crtc_init) {
-               ret = priv->variant->crtc_init(dcrtc);
+       if (dcrtc->variant->init) {
+               ret = dcrtc->variant->init(dcrtc, dev);
                if (ret) {
                        kfree(dcrtc);
                        return ret;
  
        priv->dcrtc[dcrtc->num] = dcrtc;
  
-       drm_crtc_init(dev, &dcrtc->crtc, &armada_crtc_funcs);
+       dcrtc->crtc.port = port;
+       drm_crtc_init(drm, &dcrtc->crtc, &armada_crtc_funcs);
        drm_crtc_helper_add(&dcrtc->crtc, &armada_crtc_helper_funcs);
  
        drm_object_attach_property(&dcrtc->crtc.base, priv->csc_yuv_prop,
        drm_object_attach_property(&dcrtc->crtc.base, priv->csc_rgb_prop,
                                   dcrtc->csc_rgb_mode);
  
-       return armada_overlay_plane_create(dev, 1 << dcrtc->num);
+       return armada_overlay_plane_create(drm, 1 << dcrtc->num);
+ }
+ static int
+ armada_lcd_bind(struct device *dev, struct device *master, void *data)
+ {
+       struct platform_device *pdev = to_platform_device(dev);
+       struct drm_device *drm = data;
+       struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       int irq = platform_get_irq(pdev, 0);
+       const struct armada_variant *variant;
+       struct device_node *port = NULL;
+       if (irq < 0)
+               return irq;
+       if (!dev->of_node) {
+               const struct platform_device_id *id;
+               id = platform_get_device_id(pdev);
+               if (!id)
+                       return -ENXIO;
+               variant = (const struct armada_variant *)id->driver_data;
+       } else {
+               const struct of_device_id *match;
+               struct device_node *np, *parent = dev->of_node;
+               match = of_match_device(dev->driver->of_match_table, dev);
+               if (!match)
+                       return -ENXIO;
+               np = of_get_child_by_name(parent, "ports");
+               if (np)
+                       parent = np;
+               port = of_get_child_by_name(parent, "port");
+               of_node_put(np);
+               if (!port) {
+                       dev_err(dev, "no port node found in %s\n",
+                               parent->full_name);
+                       return -ENXIO;
+               }
+               variant = match->data;
+       }
+       return armada_drm_crtc_create(drm, dev, res, irq, variant, port);
+ }
+ static void
+ armada_lcd_unbind(struct device *dev, struct device *master, void *data)
+ {
+       struct armada_crtc *dcrtc = dev_get_drvdata(dev);
+       armada_drm_crtc_destroy(&dcrtc->crtc);
  }
+ static const struct component_ops armada_lcd_ops = {
+       .bind = armada_lcd_bind,
+       .unbind = armada_lcd_unbind,
+ };
+ static int armada_lcd_probe(struct platform_device *pdev)
+ {
+       return component_add(&pdev->dev, &armada_lcd_ops);
+ }
+ static int armada_lcd_remove(struct platform_device *pdev)
+ {
+       component_del(&pdev->dev, &armada_lcd_ops);
+       return 0;
+ }
+ static struct of_device_id armada_lcd_of_match[] = {
+       {
+               .compatible     = "marvell,dove-lcd",
+               .data           = &armada510_ops,
+       },
+       {}
+ };
+ MODULE_DEVICE_TABLE(of, armada_lcd_of_match);
+ static const struct platform_device_id armada_lcd_platform_ids[] = {
+       {
+               .name           = "armada-lcd",
+               .driver_data    = (unsigned long)&armada510_ops,
+       }, {
+               .name           = "armada-510-lcd",
+               .driver_data    = (unsigned long)&armada510_ops,
+       },
+       { },
+ };
+ MODULE_DEVICE_TABLE(platform, armada_lcd_platform_ids);
+ struct platform_driver armada_lcd_platform_driver = {
+       .probe  = armada_lcd_probe,
+       .remove = armada_lcd_remove,
+       .driver = {
+               .name   = "armada-lcd",
+               .owner  =  THIS_MODULE,
+               .of_match_table = armada_lcd_of_match,
+       },
+       .id_table = armada_lcd_platform_ids,
+ };
index 1f7700897dfc1e1c98908d2b39bc0adefc5bd36d,5de27f9b8c2646a974a2f22a3dd62fda3417f34a..a26eec285da7c31fa1ed2c746fc060a0c0df1331
@@@ -53,7 -53,7 +53,7 @@@
  
  #define DRIVER_NAME           "i915"
  #define DRIVER_DESC           "Intel Graphics"
- #define DRIVER_DATE           "20080730"
+ #define DRIVER_DATE           "20140620"
  
  enum pipe {
        INVALID_PIPE = -1,
@@@ -129,6 -129,7 +129,7 @@@ enum intel_display_power_domain 
        POWER_DOMAIN_PORT_OTHER,
        POWER_DOMAIN_VGA,
        POWER_DOMAIN_AUDIO,
+       POWER_DOMAIN_PLLS,
        POWER_DOMAIN_INIT,
  
        POWER_DOMAIN_NUM,
@@@ -178,14 -179,20 +179,20 @@@ enum hpd_pin 
        list_for_each_entry((intel_connector), &(dev)->mode_config.connector_list, base.head) \
                if ((intel_connector)->base.encoder == (__encoder))
  
+ #define for_each_power_domain(domain, mask)                           \
+       for ((domain) = 0; (domain) < POWER_DOMAIN_NUM; (domain)++)     \
+               if ((1 << (domain)) & (mask))
  struct drm_i915_private;
  struct i915_mmu_object;
  
  enum intel_dpll_id {
        DPLL_ID_PRIVATE = -1, /* non-shared dpll in use */
        /* real shared dpll ids must be >= 0 */
-       DPLL_ID_PCH_PLL_A,
-       DPLL_ID_PCH_PLL_B,
+       DPLL_ID_PCH_PLL_A = 0,
+       DPLL_ID_PCH_PLL_B = 1,
+       DPLL_ID_WRPLL1 = 0,
+       DPLL_ID_WRPLL2 = 1,
  };
  #define I915_NUM_PLLS 2
  
@@@ -194,6 -201,7 +201,7 @@@ struct intel_dpll_hw_state 
        uint32_t dpll_md;
        uint32_t fp0;
        uint32_t fp1;
+       uint32_t wrpll;
  };
  
  struct intel_shared_dpll {
        /* should match the index in the dev_priv->shared_dplls array */
        enum intel_dpll_id id;
        struct intel_dpll_hw_state hw_state;
+       /* The mode_set hook is optional and should be used together with the
+        * intel_prepare_shared_dpll function. */
        void (*mode_set)(struct drm_i915_private *dev_priv,
                         struct intel_shared_dpll *pll);
        void (*enable)(struct drm_i915_private *dev_priv,
@@@ -228,12 -238,6 +238,6 @@@ void intel_link_compute_m_n(int bpp, in
                            int pixel_clock, int link_clock,
                            struct intel_link_m_n *m_n);
  
- struct intel_ddi_plls {
-       int spll_refcount;
-       int wrpll1_refcount;
-       int wrpll2_refcount;
- };
  /* Interface history:
   *
   * 1.1: Original.
@@@ -324,6 -328,7 +328,7 @@@ struct drm_i915_error_state 
        u64 fence[I915_MAX_NUM_FENCES];
        struct intel_overlay_error_state *overlay;
        struct intel_display_error_state *display;
+       struct drm_i915_error_object *semaphore_obj;
  
        struct drm_i915_error_ring {
                bool valid;
@@@ -435,8 -440,8 +440,8 @@@ struct drm_i915_display_funcs 
        void (*update_wm)(struct drm_crtc *crtc);
        void (*update_sprite_wm)(struct drm_plane *plane,
                                 struct drm_crtc *crtc,
-                                uint32_t sprite_width, int pixel_size,
-                                bool enable, bool scaled);
+                                uint32_t sprite_width, uint32_t sprite_height,
+                                int pixel_size, bool enable, bool scaled);
        void (*modeset_global_resources)(struct drm_device *dev);
        /* Returns the active state of the crtc, and if the crtc is active,
         * fills out the pipe-config with the hw state. */
@@@ -552,8 -557,6 +557,6 @@@ struct intel_device_info 
        /* Register offsets for the various display pipes and transcoders */
        int pipe_offsets[I915_MAX_TRANSCODERS];
        int trans_offsets[I915_MAX_TRANSCODERS];
-       int dpll_offsets[I915_MAX_PIPES];
-       int dpll_md_offsets[I915_MAX_PIPES];
        int palette_offsets[I915_MAX_PIPES];
        int cursor_offsets[I915_MAX_PIPES];
  };
@@@ -586,28 -589,48 +589,48 @@@ struct i915_ctx_hang_stats 
  };
  
  /* This must match up with the value previously used for execbuf2.rsvd1. */
- #define DEFAULT_CONTEXT_ID 0
+ #define DEFAULT_CONTEXT_HANDLE 0
+ /**
+  * struct intel_context - as the name implies, represents a context.
+  * @ref: reference count.
+  * @user_handle: userspace tracking identity for this context.
+  * @remap_slice: l3 row remapping information.
+  * @file_priv: filp associated with this context (NULL for global default
+  *           context).
+  * @hang_stats: information about the role of this context in possible GPU
+  *            hangs.
+  * @vm: virtual memory space used by this context.
+  * @legacy_hw_ctx: render context backing object and whether it is correctly
+  *                initialized (legacy ring submission mechanism only).
+  * @link: link in the global list of contexts.
+  *
+  * Contexts are memory images used by the hardware to store copies of their
+  * internal state.
+  */
  struct intel_context {
        struct kref ref;
-       int id;
-       bool is_initialized;
+       int user_handle;
        uint8_t remap_slice;
        struct drm_i915_file_private *file_priv;
-       struct intel_engine_cs *last_ring;
-       struct drm_i915_gem_object *obj;
        struct i915_ctx_hang_stats hang_stats;
        struct i915_address_space *vm;
  
+       struct {
+               struct drm_i915_gem_object *rcs_state;
+               bool initialized;
+       } legacy_hw_ctx;
        struct list_head link;
  };
  
  struct i915_fbc {
        unsigned long size;
+       unsigned threshold;
        unsigned int fb_id;
        enum plane plane;
        int y;
  
-       struct drm_mm_node *compressed_fb;
+       struct drm_mm_node compressed_fb;
        struct drm_mm_node *compressed_llb;
  
        struct intel_fbc_work {
@@@ -635,9 -658,15 +658,15 @@@ struct i915_drrs 
        struct intel_connector *connector;
  };
  
+ struct intel_dp;
  struct i915_psr {
+       struct mutex lock;
        bool sink_support;
        bool source_ok;
+       struct intel_dp *enabled;
+       bool active;
+       struct delayed_work work;
+       unsigned busy_frontbuffer_bits;
  };
  
  enum intel_pch {
@@@ -880,6 -909,12 +909,12 @@@ struct vlv_s0ix_state 
        u32 clock_gate_dis2;
  };
  
+ struct intel_rps_ei {
+       u32 cz_clock;
+       u32 render_c0;
+       u32 media_c0;
+ };
  struct intel_gen6_power_mgmt {
        /* work and pm_iir are protected by dev_priv->irq_lock */
        struct work_struct work;
        u8 efficient_freq;      /* AKA RPe. Pre-determined balanced frequency */
        u8 rp1_freq;            /* "less than" RP0 power/freqency */
        u8 rp0_freq;            /* Non-overclocked max frequency. */
+       u32 cz_freq;
+       u32 ei_interrupt_count;
  
        int last_adj;
        enum { LOW_POWER, BETWEEN, HIGH_POWER } power;
        bool enabled;
        struct delayed_work delayed_resume_work;
  
+       /* manual wa residency calculations */
+       struct intel_rps_ei up_ei, down_ei;
        /*
         * Protects RPS/RC6 register access and PCU communication.
         * Must be taken after struct_mutex if nested.
@@@ -931,7 -972,7 +972,7 @@@ struct intel_ilk_power_mgmt 
        unsigned long last_time1;
        unsigned long chipset_power;
        u64 last_count2;
 -      struct timespec last_time2;
 +      u64 last_time2;
        unsigned long gfx_power;
        u8 corr;
  
@@@ -1230,6 -1271,7 +1271,7 @@@ struct intel_vbt_data 
                u16 pwm_freq_hz;
                bool present;
                bool active_low_pwm;
+               u8 min_brightness;      /* min_brightness/255 of max */
        } backlight;
  
        /* MIPI DSI */
@@@ -1299,7 -1341,7 +1341,7 @@@ struct ilk_wm_values 
   */
  struct i915_runtime_pm {
        bool suspended;
-       bool irqs_disabled;
+       bool _irqs_disabled;
  };
  
  enum intel_pipe_crc_source {
@@@ -1332,6 -1374,17 +1374,17 @@@ struct intel_pipe_crc 
        wait_queue_head_t wq;
  };
  
+ struct i915_frontbuffer_tracking {
+       struct mutex lock;
+       /*
+        * Tracking bits for delayed frontbuffer flushing du to gpu activity or
+        * scheduled flips.
+        */
+       unsigned busy_bits;
+       unsigned flip_bits;
+ };
  struct drm_i915_private {
        struct drm_device *dev;
        struct kmem_cache *slab;
  
        struct pci_dev *bridge_dev;
        struct intel_engine_cs ring[I915_NUM_RINGS];
+       struct drm_i915_gem_object *semaphore_obj;
        uint32_t last_seqno, next_seqno;
  
        drm_dma_handle_t *status_page_dmah;
        /* protects the irq masks */
        spinlock_t irq_lock;
  
+       /* protects the mmio flip data */
+       spinlock_t mmio_flip_lock;
        bool display_irqs_enabled;
  
        /* To control wakeup latency, e.g. for irq-driven dp aux transfers. */
        u32 pipestat_irq_mask[I915_MAX_PIPES];
  
        struct work_struct hotplug_work;
-       bool enable_hotplug_processing;
        struct {
                unsigned long hpd_last_jiffies;
                int hpd_cnt;
  
        int num_shared_dpll;
        struct intel_shared_dpll shared_dplls[I915_NUM_PLLS];
-       struct intel_ddi_plls ddi_plls;
        int dpio_phy_iosf_port[I915_NUM_PHYS_VLV];
  
        /* Reclocking support */
        bool lvds_downclock_avail;
        /* indicates the reduced downclock for LVDS*/
        int lvds_downclock;
+       struct i915_frontbuffer_tracking fb_tracking;
        u16 orig_clock;
  
        bool mchbar_need_disable;
  
        struct i915_runtime_pm pm;
  
+       struct intel_digital_port *hpd_irq_port[I915_MAX_PORTS];
+       u32 long_hpd_port_mask;
+       u32 short_hpd_port_mask;
+       struct work_struct dig_port_work;
+       /*
+        * if we get a HPD irq from DP and a HPD irq from non-DP
+        * the non-DP HPD could block the workqueue on a mode config
+        * mutex getting, that userspace may have taken. However
+        * userspace is waiting on the DP workqueue to run which is
+        * blocked behind the non-DP one.
+        */
+       struct workqueue_struct *dp_wq;
        /* Old dri1 support infrastructure, beware the dragons ya fools entering
         * here! */
        struct i915_dri1_state dri1;
@@@ -1592,6 -1664,28 +1664,28 @@@ struct drm_i915_gem_object_ops 
        void (*release)(struct drm_i915_gem_object *);
  };
  
+ /*
+  * Frontbuffer tracking bits. Set in obj->frontbuffer_bits while a gem bo is
+  * considered to be the frontbuffer for the given plane interface-vise. This
+  * doesn't mean that the hw necessarily already scans it out, but that any
+  * rendering (by the cpu or gpu) will land in the frontbuffer eventually.
+  *
+  * We have one bit per pipe and per scanout plane type.
+  */
+ #define INTEL_FRONTBUFFER_BITS_PER_PIPE 4
+ #define INTEL_FRONTBUFFER_BITS \
+       (INTEL_FRONTBUFFER_BITS_PER_PIPE * I915_MAX_PIPES)
+ #define INTEL_FRONTBUFFER_PRIMARY(pipe) \
+       (1 << (INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe)))
+ #define INTEL_FRONTBUFFER_CURSOR(pipe) \
+       (1 << (1 +(INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe))))
+ #define INTEL_FRONTBUFFER_SPRITE(pipe) \
+       (1 << (2 +(INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe))))
+ #define INTEL_FRONTBUFFER_OVERLAY(pipe) \
+       (1 << (3 +(INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe))))
+ #define INTEL_FRONTBUFFER_ALL_MASK(pipe) \
+       (0xf << (INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe)))
  struct drm_i915_gem_object {
        struct drm_gem_object base;
  
        unsigned int pin_mappable:1;
        unsigned int pin_display:1;
  
+       /*
+        * Is the object to be mapped as read-only to the GPU
+        * Only honoured if hardware has relevant pte bit
+        */
+       unsigned long gt_ro:1;
        /*
         * Is the GPU currently using a fence to access this buffer,
         */
        unsigned int has_global_gtt_mapping:1;
        unsigned int has_dma_mapping:1;
  
+       unsigned int frontbuffer_bits:INTEL_FRONTBUFFER_BITS;
        struct sg_table *pages;
        int pages_pin_count;
  
  };
  #define to_intel_bo(x) container_of(x, struct drm_i915_gem_object, base)
  
+ void i915_gem_track_fb(struct drm_i915_gem_object *old,
+                      struct drm_i915_gem_object *new,
+                      unsigned frontbuffer_bits);
  /**
   * Request queue structure.
   *
@@@ -1940,10 -2046,8 +2046,8 @@@ struct drm_i915_cmd_table 
  #define I915_NEED_GFX_HWS(dev)        (INTEL_INFO(dev)->need_gfx_hws)
  
  #define HAS_HW_CONTEXTS(dev)  (INTEL_INFO(dev)->gen >= 6)
- #define HAS_ALIASING_PPGTT(dev)       (INTEL_INFO(dev)->gen >= 6 && \
-                                (!IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)))
- #define HAS_PPGTT(dev)                (INTEL_INFO(dev)->gen >= 7 \
-                                && !IS_GEN8(dev))
+ #define HAS_ALIASING_PPGTT(dev)       (INTEL_INFO(dev)->gen >= 6)
+ #define HAS_PPGTT(dev)                (INTEL_INFO(dev)->gen >= 7 && !IS_GEN8(dev))
  #define USES_PPGTT(dev)               intel_enable_ppgtt(dev, false)
  #define USES_FULL_PPGTT(dev)  intel_enable_ppgtt(dev, true)
  
  #define HAS_PCH_NOP(dev) (INTEL_PCH_TYPE(dev) == PCH_NOP)
  #define HAS_PCH_SPLIT(dev) (INTEL_PCH_TYPE(dev) != PCH_NONE)
  
+ #define HAS_GMCH_DISPLAY(dev) (INTEL_INFO(dev)->gen < 5 || IS_VALLEYVIEW(dev))
  /* DPF == dynamic parity feature */
  #define HAS_L3_DPF(dev) (IS_IVYBRIDGE(dev) || IS_HASWELL(dev))
  #define NUM_L3_SLICES(dev) (IS_HSW_GT3(dev) ? 2 : HAS_L3_DPF(dev))
@@@ -2040,6 -2146,8 +2146,8 @@@ struct i915_params 
        bool reset;
        bool disable_display;
        bool disable_vtd_wa;
+       int use_mmio_flip;
+       bool mmio_debug;
  };
  extern struct i915_params i915 __read_mostly;
  
@@@ -2048,12 -2156,12 +2156,12 @@@ void i915_update_dri1_breadcrumb(struc
  extern void i915_kernel_lost_context(struct drm_device * dev);
  extern int i915_driver_load(struct drm_device *, unsigned long flags);
  extern int i915_driver_unload(struct drm_device *);
- extern int i915_driver_open(struct drm_device *dev, struct drm_file *file_priv);
+ extern int i915_driver_open(struct drm_device *dev, struct drm_file *file);
  extern void i915_driver_lastclose(struct drm_device * dev);
  extern void i915_driver_preclose(struct drm_device *dev,
-                                struct drm_file *file_priv);
+                                struct drm_file *file);
  extern void i915_driver_postclose(struct drm_device *dev,
-                                 struct drm_file *file_priv);
+                                 struct drm_file *file);
  extern int i915_driver_device_is_agp(struct drm_device * dev);
  #ifdef CONFIG_COMPAT
  extern long i915_compat_ioctl(struct file *filp, unsigned int cmd,
@@@ -2084,10 -2192,12 +2192,12 @@@ extern void intel_irq_init(struct drm_d
  extern void intel_hpd_init(struct drm_device *dev);
  
  extern void intel_uncore_sanitize(struct drm_device *dev);
- extern void intel_uncore_early_sanitize(struct drm_device *dev);
+ extern void intel_uncore_early_sanitize(struct drm_device *dev,
+                                       bool restore_forcewake);
  extern void intel_uncore_init(struct drm_device *dev);
  extern void intel_uncore_check_errors(struct drm_device *dev);
  extern void intel_uncore_fini(struct drm_device *dev);
+ extern void intel_uncore_forcewake_reset(struct drm_device *dev, bool restore);
  
  void
  i915_enable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
@@@ -2235,6 -2345,8 +2345,8 @@@ bool i915_gem_retire_requests(struct dr
  void i915_gem_retire_requests_ring(struct intel_engine_cs *ring);
  int __must_check i915_gem_check_wedge(struct i915_gpu_error *error,
                                      bool interruptible);
+ int __must_check i915_gem_check_olr(struct intel_engine_cs *ring, u32 seqno);
  static inline bool i915_reset_in_progress(struct i915_gpu_error *error)
  {
        return unlikely(atomic_read(&error->reset_counter)
@@@ -2404,7 -2516,7 +2516,7 @@@ static inline void i915_gem_context_unr
  
  static inline bool i915_gem_context_is_default(const struct intel_context *c)
  {
-       return c->id == DEFAULT_CONTEXT_ID;
+       return c->user_handle == DEFAULT_CONTEXT_HANDLE;
  }
  
  int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
@@@ -2435,7 -2547,7 +2547,7 @@@ static inline void i915_gem_chipset_flu
  
  /* i915_gem_stolen.c */
  int i915_gem_init_stolen(struct drm_device *dev);
- int i915_gem_stolen_setup_compression(struct drm_device *dev, int size);
+ int i915_gem_stolen_setup_compression(struct drm_device *dev, int size, int fb_cpp);
  void i915_gem_stolen_cleanup_compression(struct drm_device *dev);
  void i915_gem_cleanup_stolen(struct drm_device *dev);
  struct drm_i915_gem_object *
@@@ -2445,7 -2557,6 +2557,6 @@@ i915_gem_object_create_stolen_for_preal
                                               u32 stolen_offset,
                                               u32 gtt_offset,
                                               u32 size);
- void i915_gem_object_release_stolen(struct drm_i915_gem_object *obj);
  
  /* i915_gem_tiling.c */
  static inline bool i915_gem_object_needs_bit17_swizzle(struct drm_i915_gem_object *obj)
@@@ -2593,8 -2704,8 +2704,8 @@@ extern bool ironlake_set_drps(struct dr
  extern void intel_init_pch_refclk(struct drm_device *dev);
  extern void gen6_set_rps(struct drm_device *dev, u8 val);
  extern void valleyview_set_rps(struct drm_device *dev, u8 val);
- extern int valleyview_rps_max_freq(struct drm_i915_private *dev_priv);
extern int valleyview_rps_min_freq(struct drm_i915_private *dev_priv);
+ extern void intel_set_memory_cxsr(struct drm_i915_private *dev_priv,
                                bool enable);
  extern void intel_detect_pch(struct drm_device *dev);
  extern int intel_trans_dp_port_sel(struct drm_crtc *crtc);
  extern int intel_enable_rc6(const struct drm_device *dev);
@@@ -2605,6 -2716,8 +2716,8 @@@ int i915_reg_read_ioctl(struct drm_devi
  int i915_get_reset_stats_ioctl(struct drm_device *dev, void *data,
                               struct drm_file *file);
  
+ void intel_notify_mmio_flip(struct intel_engine_cs *ring);
  /* overlay */
  extern struct intel_overlay_error_state *intel_overlay_capture_error_state(struct drm_device *dev);
  extern void intel_overlay_print_error_state(struct drm_i915_error_state_buf *e,
@@@ -2700,10 -2813,10 +2813,10 @@@ int vlv_freq_opcode(struct drm_i915_pri
  
  static inline uint32_t i915_vgacntrl_reg(struct drm_device *dev)
  {
-       if (HAS_PCH_SPLIT(dev))
-               return CPU_VGACNTRL;
-       else if (IS_VALLEYVIEW(dev))
+       if (IS_VALLEYVIEW(dev))
                return VLV_VGACNTRL;
+       else if (INTEL_INFO(dev)->gen >= 5)
+               return CPU_VGACNTRL;
        else
                return VGACNTRL;
  }
index f247d922e44a3dfaf1669e3a4d67814b82e62934,dcd8d7b42552a2449024419d949ec3adcf350ed1..215185050ff1113dd3861d62ca2d597e24b7bf3f
@@@ -1095,7 -1095,7 +1095,7 @@@ i915_gem_check_wedge(struct i915_gpu_er
   * Compare seqno against outstanding lazy request. Emit a request if they are
   * equal.
   */
static int
+ int
  i915_gem_check_olr(struct intel_engine_cs *ring, u32 seqno)
  {
        int ret;
@@@ -1149,26 -1149,26 +1149,26 @@@ static bool can_wait_boost(struct drm_i
  static int __wait_seqno(struct intel_engine_cs *ring, u32 seqno,
                        unsigned reset_counter,
                        bool interruptible,
 -                      struct timespec *timeout,
 +                      s64 *timeout,
                        struct drm_i915_file_private *file_priv)
  {
        struct drm_device *dev = ring->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
        const bool irq_test_in_progress =
                ACCESS_ONCE(dev_priv->gpu_error.test_irq_rings) & intel_ring_flag(ring);
 -      struct timespec before, now;
        DEFINE_WAIT(wait);
        unsigned long timeout_expire;
 +      s64 before, now;
        int ret;
  
-       WARN(dev_priv->pm.irqs_disabled, "IRQs disabled\n");
+       WARN(!intel_irqs_enabled(dev_priv), "IRQs disabled");
  
        if (i915_seqno_passed(ring->get_seqno(ring, true), seqno))
                return 0;
  
 -      timeout_expire = timeout ? jiffies + timespec_to_jiffies_timeout(timeout) : 0;
 +      timeout_expire = timeout ? jiffies + nsecs_to_jiffies((u64)*timeout) : 0;
  
-       if (INTEL_INFO(dev)->gen >= 6 && can_wait_boost(file_priv)) {
+       if (INTEL_INFO(dev)->gen >= 6 && ring->id == RCS && can_wait_boost(file_priv)) {
                gen6_rps_boost(dev_priv);
                if (file_priv)
                        mod_delayed_work(dev_priv->wq,
  
        /* Record current time in case interrupted by signal, or wedged */
        trace_i915_gem_request_wait_begin(ring, seqno);
 -      getrawmonotonic(&before);
 +      before = ktime_get_raw_ns();
        for (;;) {
                struct timer_list timer;
  
                        destroy_timer_on_stack(&timer);
                }
        }
 -      getrawmonotonic(&now);
 +      now = ktime_get_raw_ns();
        trace_i915_gem_request_wait_end(ring, seqno);
  
        if (!irq_test_in_progress)
        finish_wait(&ring->irq_queue, &wait);
  
        if (timeout) {
 -              struct timespec sleep_time = timespec_sub(now, before);
 -              *timeout = timespec_sub(*timeout, sleep_time);
 -              if (!timespec_valid(timeout)) /* i.e. negative time remains */
 -                      set_normalized_timespec(timeout, 0, 0);
 +              s64 tres = *timeout - (now - before);
 +
 +              *timeout = tres < 0 ? 0 : tres;
        }
  
        return ret;
@@@ -1560,14 -1561,29 +1560,29 @@@ int i915_gem_fault(struct vm_area_struc
        if (ret)
                goto unpin;
  
-       obj->fault_mappable = true;
+       /* Finally, remap it using the new GTT offset */
        pfn = dev_priv->gtt.mappable_base + i915_gem_obj_ggtt_offset(obj);
        pfn >>= PAGE_SHIFT;
-       pfn += page_offset;
  
-       /* Finally, remap it using the new GTT offset */
-       ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn);
+       if (!obj->fault_mappable) {
+               unsigned long size = min_t(unsigned long,
+                                          vma->vm_end - vma->vm_start,
+                                          obj->base.size);
+               int i;
+               for (i = 0; i < size >> PAGE_SHIFT; i++) {
+                       ret = vm_insert_pfn(vma,
+                                           (unsigned long)vma->vm_start + i * PAGE_SIZE,
+                                           pfn + i);
+                       if (ret)
+                               break;
+               }
+               obj->fault_mappable = true;
+       } else
+               ret = vm_insert_pfn(vma,
+                                   (unsigned long)vmf->virtual_address,
+                                   pfn + page_offset);
  unpin:
        i915_gem_object_ggtt_unpin(obj);
  unlock:
@@@ -2051,16 -2067,10 +2066,10 @@@ i915_gem_object_get_pages_gtt(struct dr
                         * our own buffer, now let the real VM do its job and
                         * go down in flames if truly OOM.
                         */
-                       gfp &= ~(__GFP_NORETRY | __GFP_NOWARN | __GFP_NO_KSWAPD);
-                       gfp |= __GFP_IO | __GFP_WAIT;
                        i915_gem_shrink_all(dev_priv);
-                       page = shmem_read_mapping_page_gfp(mapping, i, gfp);
+                       page = shmem_read_mapping_page(mapping, i);
                        if (IS_ERR(page))
                                goto err_pages;
-                       gfp |= __GFP_NORETRY | __GFP_NOWARN | __GFP_NO_KSWAPD;
-                       gfp &= ~(__GFP_IO | __GFP_WAIT);
                }
  #ifdef CONFIG_SWIOTLB
                if (swiotlb_nr_tbl()) {
@@@ -2209,6 -2219,8 +2218,8 @@@ i915_gem_object_move_to_inactive(struc
                        list_move_tail(&vma->mm_list, &vm->inactive_list);
        }
  
+       intel_fb_obj_flush(obj, true);
        list_del_init(&obj->ring_list);
        obj->ring = NULL;
  
@@@ -2318,7 -2330,7 +2329,7 @@@ int __i915_add_request(struct intel_eng
        u32 request_ring_position, request_start;
        int ret;
  
-       request_start = intel_ring_get_tail(ring);
+       request_start = intel_ring_get_tail(ring->buffer);
        /*
         * Emit any outstanding flushes - execbuf can fail to emit the flush
         * after having emitted the batchbuffer command. Hence we need to fix
         * GPU processing the request, we never over-estimate the
         * position of the head.
         */
-       request_ring_position = intel_ring_get_tail(ring);
+       request_ring_position = intel_ring_get_tail(ring->buffer);
  
        ret = ring->add_request(ring);
        if (ret)
@@@ -2745,10 -2757,16 +2756,10 @@@ i915_gem_wait_ioctl(struct drm_device *
        struct drm_i915_gem_wait *args = data;
        struct drm_i915_gem_object *obj;
        struct intel_engine_cs *ring = NULL;
 -      struct timespec timeout_stack, *timeout = NULL;
        unsigned reset_counter;
        u32 seqno = 0;
        int ret = 0;
  
 -      if (args->timeout_ns >= 0) {
 -              timeout_stack = ns_to_timespec(args->timeout_ns);
 -              timeout = &timeout_stack;
 -      }
 -
        ret = i915_mutex_lock_interruptible(dev);
        if (ret)
                return ret;
                 goto out;
  
        /* Do this after OLR check to make sure we make forward progress polling
 -       * on this IOCTL with a 0 timeout (like busy ioctl)
 +       * on this IOCTL with a timeout <=0 (like busy ioctl)
         */
 -      if (!args->timeout_ns) {
 +      if (args->timeout_ns <= 0) {
                ret = -ETIME;
                goto out;
        }
        reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
        mutex_unlock(&dev->struct_mutex);
  
 -      ret = __wait_seqno(ring, seqno, reset_counter, true, timeout, file->driver_priv);
 -      if (timeout)
 -              args->timeout_ns = timespec_to_ns(timeout);
 -      return ret;
 +      return __wait_seqno(ring, seqno, reset_counter, true, &args->timeout_ns,
 +                          file->driver_priv);
  
  out:
        drm_gem_object_unreference(&obj->base);
@@@ -2822,6 -2842,8 +2833,8 @@@ i915_gem_object_sync(struct drm_i915_ge
        idx = intel_ring_sync_index(from, to);
  
        seqno = obj->last_read_seqno;
+       /* Optimization: Avoid semaphore sync when we are sure we already
+        * waited for an object with higher seqno */
        if (seqno <= from->semaphore.sync_seqno[idx])
                return 0;
  
@@@ -2905,8 -2927,6 +2918,6 @@@ int i915_vma_unbind(struct i915_vma *vm
  
        vma->unbind_vma(vma);
  
-       i915_gem_gtt_finish_object(obj);
        list_del_init(&vma->mm_list);
        /* Avoid an unnecessary call to unbind on rebind. */
        if (i915_is_ggtt(vma->vm))
  
        /* Since the unbound list is global, only move to that list if
         * no more VMAs exist. */
-       if (list_empty(&obj->vma_list))
+       if (list_empty(&obj->vma_list)) {
+               i915_gem_gtt_finish_object(obj);
                list_move_tail(&obj->global_list, &dev_priv->mm.unbound_list);
+       }
  
        /* And finally now the object is completely decoupled from this vma,
         * we can drop its hold on the backing storage and allow it to be
@@@ -3530,6 -3552,8 +3543,8 @@@ i915_gem_object_flush_gtt_write_domain(
        old_write_domain = obj->base.write_domain;
        obj->base.write_domain = 0;
  
+       intel_fb_obj_flush(obj, false);
        trace_i915_gem_object_change_domain(obj,
                                            obj->base.read_domains,
                                            old_write_domain);
@@@ -3551,6 -3575,8 +3566,8 @@@ i915_gem_object_flush_cpu_write_domain(
        old_write_domain = obj->base.write_domain;
        obj->base.write_domain = 0;
  
+       intel_fb_obj_flush(obj, false);
        trace_i915_gem_object_change_domain(obj,
                                            obj->base.read_domains,
                                            old_write_domain);
@@@ -3604,6 -3630,9 +3621,9 @@@ i915_gem_object_set_to_gtt_domain(struc
                obj->dirty = 1;
        }
  
+       if (write)
+               intel_fb_obj_invalidate(obj, NULL);
        trace_i915_gem_object_change_domain(obj,
                                            old_read_domains,
                                            old_write_domain);
@@@ -3940,6 -3969,9 +3960,9 @@@ i915_gem_object_set_to_cpu_domain(struc
                obj->base.write_domain = I915_GEM_DOMAIN_CPU;
        }
  
+       if (write)
+               intel_fb_obj_invalidate(obj, NULL);
        trace_i915_gem_object_change_domain(obj,
                                            old_read_domains,
                                            old_write_domain);
@@@ -4428,13 -4460,14 +4451,14 @@@ void i915_gem_free_object(struct drm_ge
        if (obj->stolen)
                i915_gem_object_unpin_pages(obj);
  
+       WARN_ON(obj->frontbuffer_bits);
        if (WARN_ON(obj->pages_pin_count))
                obj->pages_pin_count = 0;
        if (discard_backing_storage(obj))
                obj->madv = I915_MADV_DONTNEED;
        i915_gem_object_put_pages(obj);
        i915_gem_object_free_mmap_offset(obj);
-       i915_gem_object_release_stolen(obj);
  
        BUG_ON(obj->pages);
  
@@@ -4912,6 -4945,8 +4936,8 @@@ i915_gem_load(struct drm_device *dev
  
        dev_priv->mm.oom_notifier.notifier_call = i915_gem_shrinker_oom;
        register_oom_notifier(&dev_priv->mm.oom_notifier);
+       mutex_init(&dev_priv->fb_tracking.lock);
  }
  
  void i915_gem_release(struct drm_device *dev, struct drm_file *file)
@@@ -4973,6 -5008,23 +4999,23 @@@ int i915_gem_open(struct drm_device *de
        return ret;
  }
  
+ void i915_gem_track_fb(struct drm_i915_gem_object *old,
+                      struct drm_i915_gem_object *new,
+                      unsigned frontbuffer_bits)
+ {
+       if (old) {
+               WARN_ON(!mutex_is_locked(&old->base.dev->struct_mutex));
+               WARN_ON(!(old->frontbuffer_bits & frontbuffer_bits));
+               old->frontbuffer_bits &= ~frontbuffer_bits;
+       }
+       if (new) {
+               WARN_ON(!mutex_is_locked(&new->base.dev->struct_mutex));
+               WARN_ON(new->frontbuffer_bits & frontbuffer_bits);
+               new->frontbuffer_bits |= frontbuffer_bits;
+       }
+ }
  static bool mutex_is_locked_by(struct mutex *mutex, struct task_struct *task)
  {
        if (!mutex_is_locked(mutex))
@@@ -5055,12 -5107,13 +5098,13 @@@ unsigned long i915_gem_obj_offset(struc
            vm == &dev_priv->mm.aliasing_ppgtt->base)
                vm = &dev_priv->gtt.base;
  
-       BUG_ON(list_empty(&o->vma_list));
        list_for_each_entry(vma, &o->vma_list, vma_link) {
                if (vma->vm == vm)
                        return vma->node.start;
  
        }
+       WARN(1, "%s vma for this object not found.\n",
+            i915_is_ggtt(vm) ? "global" : "ppgtt");
        return -1;
  }
  
@@@ -5141,8 -5194,11 +5185,11 @@@ i915_gem_shrinker_oom(struct notifier_b
        bool was_interruptible;
        bool unlock;
  
-       while (!i915_gem_shrinker_lock(dev, &unlock) && --timeout)
+       while (!i915_gem_shrinker_lock(dev, &unlock) && --timeout) {
                schedule_timeout_killable(1);
+               if (fatal_signal_pending(current))
+                       return NOTIFY_DONE;
+       }
        if (timeout == 0) {
                pr_err("Unable to purge GPU memory due lock contention.\n");
                return NOTIFY_DONE;
index f1233f544f3ee7d5dbb77891a55d58f0c3c77788,3f88f29a98c0af3b819118b596ffc94b81f1d5dc..c3bb925b2e65396d278949a13360458c93bd94a3
@@@ -93,8 -93,7 +93,7 @@@ static void i8xx_enable_fbc(struct drm_
        struct drm_device *dev = crtc->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct drm_framebuffer *fb = crtc->primary->fb;
-       struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
-       struct drm_i915_gem_object *obj = intel_fb->obj;
+       struct drm_i915_gem_object *obj = intel_fb_obj(fb);
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
        int cfb_pitch;
        int i;
@@@ -150,8 -149,7 +149,7 @@@ static void g4x_enable_fbc(struct drm_c
        struct drm_device *dev = crtc->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct drm_framebuffer *fb = crtc->primary->fb;
-       struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
-       struct drm_i915_gem_object *obj = intel_fb->obj;
+       struct drm_i915_gem_object *obj = intel_fb_obj(fb);
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
        u32 dpfc_ctl;
  
@@@ -222,16 -220,26 +220,26 @@@ static void ironlake_enable_fbc(struct 
        struct drm_device *dev = crtc->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct drm_framebuffer *fb = crtc->primary->fb;
-       struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
-       struct drm_i915_gem_object *obj = intel_fb->obj;
+       struct drm_i915_gem_object *obj = intel_fb_obj(fb);
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
        u32 dpfc_ctl;
  
        dpfc_ctl = DPFC_CTL_PLANE(intel_crtc->plane);
        if (drm_format_plane_cpp(fb->pixel_format, 0) == 2)
+               dev_priv->fbc.threshold++;
+       switch (dev_priv->fbc.threshold) {
+       case 4:
+       case 3:
+               dpfc_ctl |= DPFC_CTL_LIMIT_4X;
+               break;
+       case 2:
                dpfc_ctl |= DPFC_CTL_LIMIT_2X;
-       else
+               break;
+       case 1:
                dpfc_ctl |= DPFC_CTL_LIMIT_1X;
+               break;
+       }
        dpfc_ctl |= DPFC_CTL_FENCE_EN;
        if (IS_GEN5(dev))
                dpfc_ctl |= obj->fence_reg;
@@@ -278,16 -286,27 +286,27 @@@ static void gen7_enable_fbc(struct drm_
        struct drm_device *dev = crtc->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct drm_framebuffer *fb = crtc->primary->fb;
-       struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
-       struct drm_i915_gem_object *obj = intel_fb->obj;
+       struct drm_i915_gem_object *obj = intel_fb_obj(fb);
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
        u32 dpfc_ctl;
  
        dpfc_ctl = IVB_DPFC_CTL_PLANE(intel_crtc->plane);
        if (drm_format_plane_cpp(fb->pixel_format, 0) == 2)
+               dev_priv->fbc.threshold++;
+       switch (dev_priv->fbc.threshold) {
+       case 4:
+       case 3:
+               dpfc_ctl |= DPFC_CTL_LIMIT_4X;
+               break;
+       case 2:
                dpfc_ctl |= DPFC_CTL_LIMIT_2X;
-       else
+               break;
+       case 1:
                dpfc_ctl |= DPFC_CTL_LIMIT_1X;
+               break;
+       }
        dpfc_ctl |= IVB_DPFC_CTL_FENCE_EN;
  
        I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
@@@ -462,7 -481,6 +481,6 @@@ void intel_update_fbc(struct drm_devic
        struct drm_crtc *crtc = NULL, *tmp_crtc;
        struct intel_crtc *intel_crtc;
        struct drm_framebuffer *fb;
-       struct intel_framebuffer *intel_fb;
        struct drm_i915_gem_object *obj;
        const struct drm_display_mode *adjusted_mode;
        unsigned int max_width, max_height;
  
        intel_crtc = to_intel_crtc(crtc);
        fb = crtc->primary->fb;
-       intel_fb = to_intel_framebuffer(fb);
-       obj = intel_fb->obj;
+       obj = intel_fb_obj(fb);
        adjusted_mode = &intel_crtc->config.adjusted_mode;
  
        if (i915.enable_fbc < 0) {
                goto out_disable;
        }
  
-       if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
+       if (INTEL_INFO(dev)->gen >= 8 || IS_HASWELL(dev)) {
+               max_width = 4096;
+               max_height = 4096;
+       } else if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
                max_width = 4096;
                max_height = 2048;
        } else {
        if (in_dbg_master())
                goto out_disable;
  
-       if (i915_gem_stolen_setup_compression(dev, intel_fb->obj->base.size)) {
+       if (i915_gem_stolen_setup_compression(dev, obj->base.size,
+                                             drm_format_plane_cpp(fb->pixel_format, 0))) {
                if (set_no_fbc_reason(dev_priv, FBC_STOLEN_TOO_SMALL))
                        DRM_DEBUG_KMS("framebuffer too large, disabling compression\n");
                goto out_disable;
@@@ -789,12 -810,33 +810,33 @@@ static const struct cxsr_latency *intel
        return NULL;
  }
  
static void pineview_disable_cxsr(struct drm_device *dev)
void intel_set_memory_cxsr(struct drm_i915_private *dev_priv, bool enable)
  {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_device *dev = dev_priv->dev;
+       u32 val;
+       if (IS_VALLEYVIEW(dev)) {
+               I915_WRITE(FW_BLC_SELF_VLV, enable ? FW_CSPWRDWNEN : 0);
+       } else if (IS_G4X(dev) || IS_CRESTLINE(dev)) {
+               I915_WRITE(FW_BLC_SELF, enable ? FW_BLC_SELF_EN : 0);
+       } else if (IS_PINEVIEW(dev)) {
+               val = I915_READ(DSPFW3) & ~PINEVIEW_SELF_REFRESH_EN;
+               val |= enable ? PINEVIEW_SELF_REFRESH_EN : 0;
+               I915_WRITE(DSPFW3, val);
+       } else if (IS_I945G(dev) || IS_I945GM(dev)) {
+               val = enable ? _MASKED_BIT_ENABLE(FW_BLC_SELF_EN) :
+                              _MASKED_BIT_DISABLE(FW_BLC_SELF_EN);
+               I915_WRITE(FW_BLC_SELF, val);
+       } else if (IS_I915GM(dev)) {
+               val = enable ? _MASKED_BIT_ENABLE(INSTPM_SELF_EN) :
+                              _MASKED_BIT_DISABLE(INSTPM_SELF_EN);
+               I915_WRITE(INSTPM, val);
+       } else {
+               return;
+       }
  
-       /* deactivate cxsr */
-       I915_WRITE(DSPFW3, I915_READ(DSPFW3) & ~PINEVIEW_SELF_REFRESH_EN);
+       DRM_DEBUG_KMS("memory self-refresh is %s\n",
+                     enable ? "enabled" : "disabled");
  }
  
  /*
@@@ -864,95 -906,95 +906,95 @@@ static int i845_get_fifo_size(struct dr
  
  /* Pineview has different values for various configs */
  static const struct intel_watermark_params pineview_display_wm = {
-       PINEVIEW_DISPLAY_FIFO,
-       PINEVIEW_MAX_WM,
-       PINEVIEW_DFT_WM,
-       PINEVIEW_GUARD_WM,
-       PINEVIEW_FIFO_LINE_SIZE
+       .fifo_size = PINEVIEW_DISPLAY_FIFO,
+       .max_wm = PINEVIEW_MAX_WM,
+       .default_wm = PINEVIEW_DFT_WM,
+       .guard_size = PINEVIEW_GUARD_WM,
+       .cacheline_size = PINEVIEW_FIFO_LINE_SIZE,
  };
  static const struct intel_watermark_params pineview_display_hplloff_wm = {
-       PINEVIEW_DISPLAY_FIFO,
-       PINEVIEW_MAX_WM,
-       PINEVIEW_DFT_HPLLOFF_WM,
-       PINEVIEW_GUARD_WM,
-       PINEVIEW_FIFO_LINE_SIZE
+       .fifo_size = PINEVIEW_DISPLAY_FIFO,
+       .max_wm = PINEVIEW_MAX_WM,
+       .default_wm = PINEVIEW_DFT_HPLLOFF_WM,
+       .guard_size = PINEVIEW_GUARD_WM,
+       .cacheline_size = PINEVIEW_FIFO_LINE_SIZE,
  };
  static const struct intel_watermark_params pineview_cursor_wm = {
-       PINEVIEW_CURSOR_FIFO,
-       PINEVIEW_CURSOR_MAX_WM,
-       PINEVIEW_CURSOR_DFT_WM,
-       PINEVIEW_CURSOR_GUARD_WM,
-       PINEVIEW_FIFO_LINE_SIZE,
+       .fifo_size = PINEVIEW_CURSOR_FIFO,
+       .max_wm = PINEVIEW_CURSOR_MAX_WM,
+       .default_wm = PINEVIEW_CURSOR_DFT_WM,
+       .guard_size = PINEVIEW_CURSOR_GUARD_WM,
+       .cacheline_size = PINEVIEW_FIFO_LINE_SIZE,
  };
  static const struct intel_watermark_params pineview_cursor_hplloff_wm = {
-       PINEVIEW_CURSOR_FIFO,
-       PINEVIEW_CURSOR_MAX_WM,
-       PINEVIEW_CURSOR_DFT_WM,
-       PINEVIEW_CURSOR_GUARD_WM,
-       PINEVIEW_FIFO_LINE_SIZE
+       .fifo_size = PINEVIEW_CURSOR_FIFO,
+       .max_wm = PINEVIEW_CURSOR_MAX_WM,
+       .default_wm = PINEVIEW_CURSOR_DFT_WM,
+       .guard_size = PINEVIEW_CURSOR_GUARD_WM,
+       .cacheline_size = PINEVIEW_FIFO_LINE_SIZE,
  };
  static const struct intel_watermark_params g4x_wm_info = {
-       G4X_FIFO_SIZE,
-       G4X_MAX_WM,
-       G4X_MAX_WM,
-       2,
-       G4X_FIFO_LINE_SIZE,
+       .fifo_size = G4X_FIFO_SIZE,
+       .max_wm = G4X_MAX_WM,
+       .default_wm = G4X_MAX_WM,
+       .guard_size = 2,
+       .cacheline_size = G4X_FIFO_LINE_SIZE,
  };
  static const struct intel_watermark_params g4x_cursor_wm_info = {
-       I965_CURSOR_FIFO,
-       I965_CURSOR_MAX_WM,
-       I965_CURSOR_DFT_WM,
-       2,
-       G4X_FIFO_LINE_SIZE,
+       .fifo_size = I965_CURSOR_FIFO,
+       .max_wm = I965_CURSOR_MAX_WM,
+       .default_wm = I965_CURSOR_DFT_WM,
+       .guard_size = 2,
+       .cacheline_size = G4X_FIFO_LINE_SIZE,
  };
  static const struct intel_watermark_params valleyview_wm_info = {
-       VALLEYVIEW_FIFO_SIZE,
-       VALLEYVIEW_MAX_WM,
-       VALLEYVIEW_MAX_WM,
-       2,
-       G4X_FIFO_LINE_SIZE,
+       .fifo_size = VALLEYVIEW_FIFO_SIZE,
+       .max_wm = VALLEYVIEW_MAX_WM,
+       .default_wm = VALLEYVIEW_MAX_WM,
+       .guard_size = 2,
+       .cacheline_size = G4X_FIFO_LINE_SIZE,
  };
  static const struct intel_watermark_params valleyview_cursor_wm_info = {
-       I965_CURSOR_FIFO,
-       VALLEYVIEW_CURSOR_MAX_WM,
-       I965_CURSOR_DFT_WM,
-       2,
-       G4X_FIFO_LINE_SIZE,
+       .fifo_size = I965_CURSOR_FIFO,
+       .max_wm = VALLEYVIEW_CURSOR_MAX_WM,
+       .default_wm = I965_CURSOR_DFT_WM,
+       .guard_size = 2,
+       .cacheline_size = G4X_FIFO_LINE_SIZE,
  };
  static const struct intel_watermark_params i965_cursor_wm_info = {
-       I965_CURSOR_FIFO,
-       I965_CURSOR_MAX_WM,
-       I965_CURSOR_DFT_WM,
-       2,
-       I915_FIFO_LINE_SIZE,
+       .fifo_size = I965_CURSOR_FIFO,
+       .max_wm = I965_CURSOR_MAX_WM,
+       .default_wm = I965_CURSOR_DFT_WM,
+       .guard_size = 2,
+       .cacheline_size = I915_FIFO_LINE_SIZE,
  };
  static const struct intel_watermark_params i945_wm_info = {
-       I945_FIFO_SIZE,
-       I915_MAX_WM,
-       1,
-       2,
-       I915_FIFO_LINE_SIZE
+       .fifo_size = I945_FIFO_SIZE,
+       .max_wm = I915_MAX_WM,
+       .default_wm = 1,
+       .guard_size = 2,
+       .cacheline_size = I915_FIFO_LINE_SIZE,
  };
  static const struct intel_watermark_params i915_wm_info = {
-       I915_FIFO_SIZE,
-       I915_MAX_WM,
-       1,
-       2,
-       I915_FIFO_LINE_SIZE
+       .fifo_size = I915_FIFO_SIZE,
+       .max_wm = I915_MAX_WM,
+       .default_wm = 1,
+       .guard_size = 2,
+       .cacheline_size = I915_FIFO_LINE_SIZE,
  };
  static const struct intel_watermark_params i830_wm_info = {
-       I855GM_FIFO_SIZE,
-       I915_MAX_WM,
-       1,
-       2,
-       I830_FIFO_LINE_SIZE
+       .fifo_size = I855GM_FIFO_SIZE,
+       .max_wm = I915_MAX_WM,
+       .default_wm = 1,
+       .guard_size = 2,
+       .cacheline_size = I830_FIFO_LINE_SIZE,
  };
  static const struct intel_watermark_params i845_wm_info = {
-       I830_FIFO_SIZE,
-       I915_MAX_WM,
-       1,
-       2,
-       I830_FIFO_LINE_SIZE
+       .fifo_size = I830_FIFO_SIZE,
+       .max_wm = I915_MAX_WM,
+       .default_wm = 1,
+       .guard_size = 2,
+       .cacheline_size = I830_FIFO_LINE_SIZE,
  };
  
  /**
@@@ -1033,7 -1075,7 +1075,7 @@@ static void pineview_update_wm(struct d
                                         dev_priv->fsb_freq, dev_priv->mem_freq);
        if (!latency) {
                DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n");
-               pineview_disable_cxsr(dev);
+               intel_set_memory_cxsr(dev_priv, false);
                return;
        }
  
                I915_WRITE(DSPFW3, reg);
                DRM_DEBUG_KMS("DSPFW3 register is %x\n", reg);
  
-               /* activate cxsr */
-               I915_WRITE(DSPFW3,
-                          I915_READ(DSPFW3) | PINEVIEW_SELF_REFRESH_EN);
-               DRM_DEBUG_KMS("Self-refresh is enabled\n");
+               intel_set_memory_cxsr(dev_priv, true);
        } else {
-               pineview_disable_cxsr(dev);
-               DRM_DEBUG_KMS("Self-refresh is disabled\n");
+               intel_set_memory_cxsr(dev_priv, false);
        }
  }
  
@@@ -1316,6 -1354,7 +1354,7 @@@ static void valleyview_update_wm(struc
        int plane_sr, cursor_sr;
        int ignore_plane_sr, ignore_cursor_sr;
        unsigned int enabled = 0;
+       bool cxsr_enabled;
  
        vlv_update_drain_latency(dev);
  
                             &valleyview_wm_info,
                             &valleyview_cursor_wm_info,
                             &ignore_plane_sr, &cursor_sr)) {
-               I915_WRITE(FW_BLC_SELF_VLV, FW_CSPWRDWNEN);
+               cxsr_enabled = true;
        } else {
-               I915_WRITE(FW_BLC_SELF_VLV,
-                          I915_READ(FW_BLC_SELF_VLV) & ~FW_CSPWRDWNEN);
+               cxsr_enabled = false;
+               intel_set_memory_cxsr(dev_priv, false);
                plane_sr = cursor_sr = 0;
        }
  
        I915_WRITE(DSPFW3,
                   (I915_READ(DSPFW3) & ~DSPFW_CURSOR_SR_MASK) |
                   (cursor_sr << DSPFW_CURSOR_SR_SHIFT));
+       if (cxsr_enabled)
+               intel_set_memory_cxsr(dev_priv, true);
  }
  
  static void g4x_update_wm(struct drm_crtc *crtc)
        int planea_wm, planeb_wm, cursora_wm, cursorb_wm;
        int plane_sr, cursor_sr;
        unsigned int enabled = 0;
+       bool cxsr_enabled;
  
        if (g4x_compute_wm0(dev, PIPE_A,
                            &g4x_wm_info, latency_ns,
                             &g4x_wm_info,
                             &g4x_cursor_wm_info,
                             &plane_sr, &cursor_sr)) {
-               I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN);
+               cxsr_enabled = true;
        } else {
-               I915_WRITE(FW_BLC_SELF,
-                          I915_READ(FW_BLC_SELF) & ~FW_BLC_SELF_EN);
+               cxsr_enabled = false;
+               intel_set_memory_cxsr(dev_priv, false);
                plane_sr = cursor_sr = 0;
        }
  
        I915_WRITE(DSPFW3,
                   (I915_READ(DSPFW3) & ~(DSPFW_HPLL_SR_EN | DSPFW_CURSOR_SR_MASK)) |
                   (cursor_sr << DSPFW_CURSOR_SR_SHIFT));
+       if (cxsr_enabled)
+               intel_set_memory_cxsr(dev_priv, true);
  }
  
  static void i965_update_wm(struct drm_crtc *unused_crtc)
        struct drm_crtc *crtc;
        int srwm = 1;
        int cursor_sr = 16;
+       bool cxsr_enabled;
  
        /* Calc sr entries for one plane configs */
        crtc = single_enabled_crtc(dev);
                DRM_DEBUG_KMS("self-refresh watermark: display plane %d "
                              "cursor %d\n", srwm, cursor_sr);
  
-               if (IS_CRESTLINE(dev))
-                       I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN);
+               cxsr_enabled = true;
        } else {
+               cxsr_enabled = false;
                /* Turn off self refresh if both pipes are enabled */
-               if (IS_CRESTLINE(dev))
-                       I915_WRITE(FW_BLC_SELF, I915_READ(FW_BLC_SELF)
-                                  & ~FW_BLC_SELF_EN);
+               intel_set_memory_cxsr(dev_priv, false);
        }
  
        DRM_DEBUG_KMS("Setting FIFO watermarks - A: 8, B: 8, C: 8, SR %d\n",
        I915_WRITE(DSPFW2, (8 << 8) | (8 << 0));
        /* update cursor SR watermark */
        I915_WRITE(DSPFW3, (cursor_sr << DSPFW_CURSOR_SR_SHIFT));
+       if (cxsr_enabled)
+               intel_set_memory_cxsr(dev_priv, true);
  }
  
  static void i9xx_update_wm(struct drm_crtc *unused_crtc)
        DRM_DEBUG_KMS("FIFO watermarks - A: %d, B: %d\n", planea_wm, planeb_wm);
  
        if (IS_I915GM(dev) && enabled) {
-               struct intel_framebuffer *fb;
+               struct drm_i915_gem_object *obj;
  
-               fb = to_intel_framebuffer(enabled->primary->fb);
+               obj = intel_fb_obj(enabled->primary->fb);
  
                /* self-refresh seems busted with untiled */
-               if (fb->obj->tiling_mode == I915_TILING_NONE)
+               if (obj->tiling_mode == I915_TILING_NONE)
                        enabled = NULL;
        }
  
        cwm = 2;
  
        /* Play safe and disable self-refresh before adjusting watermarks. */
-       if (IS_I945G(dev) || IS_I945GM(dev))
-               I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN_MASK | 0);
-       else if (IS_I915GM(dev))
-               I915_WRITE(INSTPM, _MASKED_BIT_DISABLE(INSTPM_SELF_EN));
+       intel_set_memory_cxsr(dev_priv, false);
  
        /* Calc sr entries for one plane configs */
        if (HAS_FW_BLC(dev) && enabled) {
        I915_WRITE(FW_BLC, fwater_lo);
        I915_WRITE(FW_BLC2, fwater_hi);
  
-       if (HAS_FW_BLC(dev)) {
-               if (enabled) {
-                       if (IS_I945G(dev) || IS_I945GM(dev))
-                               I915_WRITE(FW_BLC_SELF,
-                                          FW_BLC_SELF_EN_MASK | FW_BLC_SELF_EN);
-                       else if (IS_I915GM(dev))
-                               I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_SELF_EN));
-                       DRM_DEBUG_KMS("memory self refresh enabled\n");
-               } else
-                       DRM_DEBUG_KMS("memory self refresh disabled\n");
-       }
+       if (enabled)
+               intel_set_memory_cxsr(dev_priv, true);
  }
  
  static void i845_update_wm(struct drm_crtc *unused_crtc)
@@@ -2707,10 -2743,11 +2743,11 @@@ static void ilk_update_wm(struct drm_cr
        ilk_write_wm_values(dev_priv, &results);
  }
  
- static void ilk_update_sprite_wm(struct drm_plane *plane,
-                                    struct drm_crtc *crtc,
-                                    uint32_t sprite_width, int pixel_size,
-                                    bool enabled, bool scaled)
+ static void
+ ilk_update_sprite_wm(struct drm_plane *plane,
+                    struct drm_crtc *crtc,
+                    uint32_t sprite_width, uint32_t sprite_height,
+                    int pixel_size, bool enabled, bool scaled)
  {
        struct drm_device *dev = plane->dev;
        struct intel_plane *intel_plane = to_intel_plane(plane);
        intel_plane->wm.enabled = enabled;
        intel_plane->wm.scaled = scaled;
        intel_plane->wm.horiz_pixels = sprite_width;
+       intel_plane->wm.vert_pixels = sprite_width;
        intel_plane->wm.bytes_per_pixel = pixel_size;
  
        /*
@@@ -2852,13 -2890,16 +2890,16 @@@ void intel_update_watermarks(struct drm
  
  void intel_update_sprite_watermarks(struct drm_plane *plane,
                                    struct drm_crtc *crtc,
-                                   uint32_t sprite_width, int pixel_size,
+                                   uint32_t sprite_width,
+                                   uint32_t sprite_height,
+                                   int pixel_size,
                                    bool enabled, bool scaled)
  {
        struct drm_i915_private *dev_priv = plane->dev->dev_private;
  
        if (dev_priv->display.update_sprite_wm)
-               dev_priv->display.update_sprite_wm(plane, crtc, sprite_width,
+               dev_priv->display.update_sprite_wm(plane, crtc,
+                                                  sprite_width, sprite_height,
                                                   pixel_size, enabled, scaled);
  }
  
@@@ -2993,7 -3034,7 +3034,7 @@@ static void ironlake_enable_drps(struc
                I915_READ(0x112e0);
        dev_priv->ips.last_time1 = jiffies_to_msecs(jiffies);
        dev_priv->ips.last_count2 = I915_READ(0x112f4);
 -      getrawmonotonic(&dev_priv->ips.last_time2);
 +      dev_priv->ips.last_time2 = ktime_get_raw_ns();
  
        spin_unlock_irq(&mchdev_lock);
  }
@@@ -3147,6 -3188,9 +3188,9 @@@ static u32 gen6_rps_pm_mask(struct drm_
        if (val < dev_priv->rps.max_freq_softlimit)
                mask |= GEN6_PM_RP_UP_THRESHOLD;
  
+       mask |= dev_priv->pm_rps_events & (GEN6_PM_RP_DOWN_EI_EXPIRED | GEN6_PM_RP_UP_EI_EXPIRED);
+       mask &= dev_priv->pm_rps_events;
        /* IVB and SNB hard hangs on looping batchbuffer
         * if GEN6_PM_UP_EI_EXPIRED is masked.
         */
@@@ -3250,7 -3294,9 +3294,9 @@@ void gen6_rps_idle(struct drm_i915_priv
  
        mutex_lock(&dev_priv->rps.hw_lock);
        if (dev_priv->rps.enabled) {
-               if (IS_VALLEYVIEW(dev))
+               if (IS_CHERRYVIEW(dev))
+                       valleyview_set_rps(dev_priv->dev, dev_priv->rps.min_freq_softlimit);
+               else if (IS_VALLEYVIEW(dev))
                        vlv_set_rps_idle(dev_priv);
                else
                        gen6_set_rps(dev_priv->dev, dev_priv->rps.min_freq_softlimit);
@@@ -3348,6 -3394,15 +3394,15 @@@ static void gen6_disable_rps(struct drm
                gen6_disable_rps_interrupts(dev);
  }
  
+ static void cherryview_disable_rps(struct drm_device *dev)
+ {
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       I915_WRITE(GEN6_RC_CONTROL, 0);
+       gen8_disable_rps_interrupts(dev);
+ }
  static void valleyview_disable_rps(struct drm_device *dev)
  {
        struct drm_i915_private *dev_priv = dev->dev_private;
@@@ -3419,7 -3474,7 +3474,7 @@@ static void gen8_enable_rps_interrupts(
  
        spin_lock_irq(&dev_priv->irq_lock);
        WARN_ON(dev_priv->rps.pm_iir);
-       bdw_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
+       gen8_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
        I915_WRITE(GEN8_GT_IIR(2), dev_priv->pm_rps_events);
        spin_unlock_irq(&dev_priv->irq_lock);
  }
@@@ -3430,7 -3485,7 +3485,7 @@@ static void gen6_enable_rps_interrupts(
  
        spin_lock_irq(&dev_priv->irq_lock);
        WARN_ON(dev_priv->rps.pm_iir);
-       snb_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
+       gen6_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
        I915_WRITE(GEN6_PMIIR, dev_priv->pm_rps_events);
        spin_unlock_irq(&dev_priv->irq_lock);
  }
@@@ -3483,15 -3538,23 +3538,23 @@@ static void gen8_enable_rps(struct drm_
        for_each_ring(ring, dev_priv, unused)
                I915_WRITE(RING_MAX_IDLE(ring->mmio_base), 10);
        I915_WRITE(GEN6_RC_SLEEP, 0);
-       I915_WRITE(GEN6_RC6_THRESHOLD, 50000); /* 50/125ms per EI */
+       if (IS_BROADWELL(dev))
+               I915_WRITE(GEN6_RC6_THRESHOLD, 625); /* 800us/1.28 for TO */
+       else
+               I915_WRITE(GEN6_RC6_THRESHOLD, 50000); /* 50/125ms per EI */
  
        /* 3: Enable RC6 */
        if (intel_enable_rc6(dev) & INTEL_RC6_ENABLE)
                rc6_mask = GEN6_RC_CTL_RC6_ENABLE;
        intel_print_rc6_info(dev, rc6_mask);
-       I915_WRITE(GEN6_RC_CONTROL, GEN6_RC_CTL_HW_ENABLE |
-                                   GEN6_RC_CTL_EI_MODE(1) |
-                                   rc6_mask);
+       if (IS_BROADWELL(dev))
+               I915_WRITE(GEN6_RC_CONTROL, GEN6_RC_CTL_HW_ENABLE |
+                               GEN7_RC_CTL_TO_MODE |
+                               rc6_mask);
+       else
+               I915_WRITE(GEN6_RC_CONTROL, GEN6_RC_CTL_HW_ENABLE |
+                               GEN6_RC_CTL_EI_MODE(1) |
+                               rc6_mask);
  
        /* 4 Program defaults and thresholds for RPS*/
        I915_WRITE(GEN6_RPNSWREQ,
@@@ -3727,7 -3790,57 +3790,57 @@@ void gen6_update_ring_freq(struct drm_d
        mutex_unlock(&dev_priv->rps.hw_lock);
  }
  
- int valleyview_rps_max_freq(struct drm_i915_private *dev_priv)
+ static int cherryview_rps_max_freq(struct drm_i915_private *dev_priv)
+ {
+       u32 val, rp0;
+       val = vlv_punit_read(dev_priv, PUNIT_GPU_STATUS_REG);
+       rp0 = (val >> PUNIT_GPU_STATUS_MAX_FREQ_SHIFT) & PUNIT_GPU_STATUS_MAX_FREQ_MASK;
+       return rp0;
+ }
+ static int cherryview_rps_rpe_freq(struct drm_i915_private *dev_priv)
+ {
+       u32 val, rpe;
+       val = vlv_punit_read(dev_priv, PUNIT_GPU_DUTYCYCLE_REG);
+       rpe = (val >> PUNIT_GPU_DUTYCYCLE_RPE_FREQ_SHIFT) & PUNIT_GPU_DUTYCYCLE_RPE_FREQ_MASK;
+       return rpe;
+ }
+ static int cherryview_rps_guar_freq(struct drm_i915_private *dev_priv)
+ {
+       u32 val, rp1;
+       val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
+       rp1 = (val >> PUNIT_GPU_STATUS_MAX_FREQ_SHIFT) & PUNIT_GPU_STATUS_MAX_FREQ_MASK;
+       return rp1;
+ }
+ static int cherryview_rps_min_freq(struct drm_i915_private *dev_priv)
+ {
+       u32 val, rpn;
+       val = vlv_punit_read(dev_priv, PUNIT_GPU_STATUS_REG);
+       rpn = (val >> PUNIT_GPU_STATIS_GFX_MIN_FREQ_SHIFT) & PUNIT_GPU_STATUS_GFX_MIN_FREQ_MASK;
+       return rpn;
+ }
+ static int valleyview_rps_guar_freq(struct drm_i915_private *dev_priv)
+ {
+       u32 val, rp1;
+       val = vlv_nc_read(dev_priv, IOSF_NC_FB_GFX_FREQ_FUSE);
+       rp1 = (val & FB_GFX_FGUARANTEED_FREQ_FUSE_MASK) >> FB_GFX_FGUARANTEED_FREQ_FUSE_SHIFT;
+       return rp1;
+ }
+ static int valleyview_rps_max_freq(struct drm_i915_private *dev_priv)
  {
        u32 val, rp0;
  
@@@ -3752,7 -3865,7 +3865,7 @@@ static int valleyview_rps_rpe_freq(stru
        return rpe;
  }
  
- int valleyview_rps_min_freq(struct drm_i915_private *dev_priv)
static int valleyview_rps_min_freq(struct drm_i915_private *dev_priv)
  {
        return vlv_punit_read(dev_priv, PUNIT_REG_GPU_LFM) & 0xff;
  }
@@@ -3766,6 -3879,35 +3879,35 @@@ static void valleyview_check_pctx(struc
                             dev_priv->vlv_pctx->stolen->start);
  }
  
+ /* Check that the pcbr address is not empty. */
+ static void cherryview_check_pctx(struct drm_i915_private *dev_priv)
+ {
+       unsigned long pctx_addr = I915_READ(VLV_PCBR) & ~4095;
+       WARN_ON((pctx_addr >> VLV_PCBR_ADDR_SHIFT) == 0);
+ }
+ static void cherryview_setup_pctx(struct drm_device *dev)
+ {
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       unsigned long pctx_paddr, paddr;
+       struct i915_gtt *gtt = &dev_priv->gtt;
+       u32 pcbr;
+       int pctx_size = 32*1024;
+       WARN_ON(!mutex_is_locked(&dev->struct_mutex));
+       pcbr = I915_READ(VLV_PCBR);
+       if ((pcbr >> VLV_PCBR_ADDR_SHIFT) == 0) {
+               paddr = (dev_priv->mm.stolen_base +
+                        (gtt->stolen_size - pctx_size));
+               pctx_paddr = (paddr & (~4095));
+               I915_WRITE(VLV_PCBR, pctx_paddr);
+       }
+ }
  static void valleyview_setup_pctx(struct drm_device *dev)
  {
        struct drm_i915_private *dev_priv = dev->dev_private;
@@@ -3840,6 -3982,11 +3982,11 @@@ static void valleyview_init_gt_powersav
                         vlv_gpu_freq(dev_priv, dev_priv->rps.efficient_freq),
                         dev_priv->rps.efficient_freq);
  
+       dev_priv->rps.rp1_freq = valleyview_rps_guar_freq(dev_priv);
+       DRM_DEBUG_DRIVER("RP1(Guar Freq) GPU freq: %d MHz (%u)\n",
+                        vlv_gpu_freq(dev_priv, dev_priv->rps.rp1_freq),
+                        dev_priv->rps.rp1_freq);
        dev_priv->rps.min_freq = valleyview_rps_min_freq(dev_priv);
        DRM_DEBUG_DRIVER("min GPU freq: %d MHz (%u)\n",
                         vlv_gpu_freq(dev_priv, dev_priv->rps.min_freq),
        mutex_unlock(&dev_priv->rps.hw_lock);
  }
  
+ static void cherryview_init_gt_powersave(struct drm_device *dev)
+ {
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       cherryview_setup_pctx(dev);
+       mutex_lock(&dev_priv->rps.hw_lock);
+       dev_priv->rps.max_freq = cherryview_rps_max_freq(dev_priv);
+       dev_priv->rps.rp0_freq = dev_priv->rps.max_freq;
+       DRM_DEBUG_DRIVER("max GPU freq: %d MHz (%u)\n",
+                        vlv_gpu_freq(dev_priv, dev_priv->rps.max_freq),
+                        dev_priv->rps.max_freq);
+       dev_priv->rps.efficient_freq = cherryview_rps_rpe_freq(dev_priv);
+       DRM_DEBUG_DRIVER("RPe GPU freq: %d MHz (%u)\n",
+                        vlv_gpu_freq(dev_priv, dev_priv->rps.efficient_freq),
+                        dev_priv->rps.efficient_freq);
+       dev_priv->rps.rp1_freq = cherryview_rps_guar_freq(dev_priv);
+       DRM_DEBUG_DRIVER("RP1(Guar) GPU freq: %d MHz (%u)\n",
+                        vlv_gpu_freq(dev_priv, dev_priv->rps.rp1_freq),
+                        dev_priv->rps.rp1_freq);
+       dev_priv->rps.min_freq = cherryview_rps_min_freq(dev_priv);
+       DRM_DEBUG_DRIVER("min GPU freq: %d MHz (%u)\n",
+                        vlv_gpu_freq(dev_priv, dev_priv->rps.min_freq),
+                        dev_priv->rps.min_freq);
+       /* Preserve min/max settings in case of re-init */
+       if (dev_priv->rps.max_freq_softlimit == 0)
+               dev_priv->rps.max_freq_softlimit = dev_priv->rps.max_freq;
+       if (dev_priv->rps.min_freq_softlimit == 0)
+               dev_priv->rps.min_freq_softlimit = dev_priv->rps.min_freq;
+       mutex_unlock(&dev_priv->rps.hw_lock);
+ }
  static void valleyview_cleanup_gt_powersave(struct drm_device *dev)
  {
        valleyview_cleanup_pctx(dev);
  }
  
+ static void cherryview_enable_rps(struct drm_device *dev)
+ {
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct intel_engine_cs *ring;
+       u32 gtfifodbg, val, rc6_mode = 0, pcbr;
+       int i;
+       WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
+       gtfifodbg = I915_READ(GTFIFODBG);
+       if (gtfifodbg) {
+               DRM_DEBUG_DRIVER("GT fifo had a previous error %x\n",
+                                gtfifodbg);
+               I915_WRITE(GTFIFODBG, gtfifodbg);
+       }
+       cherryview_check_pctx(dev_priv);
+       /* 1a & 1b: Get forcewake during program sequence. Although the driver
+        * hasn't enabled a state yet where we need forcewake, BIOS may have.*/
+       gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL);
+       /* 2a: Program RC6 thresholds.*/
+       I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16);
+       I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000); /* 12500 * 1280ns */
+       I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25); /* 25 * 1280ns */
+       for_each_ring(ring, dev_priv, i)
+               I915_WRITE(RING_MAX_IDLE(ring->mmio_base), 10);
+       I915_WRITE(GEN6_RC_SLEEP, 0);
+       I915_WRITE(GEN6_RC6_THRESHOLD, 50000); /* 50/125ms per EI */
+       /* allows RC6 residency counter to work */
+       I915_WRITE(VLV_COUNTER_CONTROL,
+                  _MASKED_BIT_ENABLE(VLV_COUNT_RANGE_HIGH |
+                                     VLV_MEDIA_RC6_COUNT_EN |
+                                     VLV_RENDER_RC6_COUNT_EN));
+       /* For now we assume BIOS is allocating and populating the PCBR  */
+       pcbr = I915_READ(VLV_PCBR);
+       DRM_DEBUG_DRIVER("PCBR offset : 0x%x\n", pcbr);
+       /* 3: Enable RC6 */
+       if ((intel_enable_rc6(dev) & INTEL_RC6_ENABLE) &&
+                                               (pcbr >> VLV_PCBR_ADDR_SHIFT))
+               rc6_mode = GEN6_RC_CTL_EI_MODE(1);
+       I915_WRITE(GEN6_RC_CONTROL, rc6_mode);
+       /* 4 Program defaults and thresholds for RPS*/
+       I915_WRITE(GEN6_RP_UP_THRESHOLD, 59400);
+       I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 245000);
+       I915_WRITE(GEN6_RP_UP_EI, 66000);
+       I915_WRITE(GEN6_RP_DOWN_EI, 350000);
+       I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
+       /* WaDisablePwrmtrEvent:chv (pre-production hw) */
+       I915_WRITE(0xA80C, I915_READ(0xA80C) & 0x00ffffff);
+       I915_WRITE(0xA810, I915_READ(0xA810) & 0xffffff00);
+       /* 5: Enable RPS */
+       I915_WRITE(GEN6_RP_CONTROL,
+                  GEN6_RP_MEDIA_HW_NORMAL_MODE |
+                  GEN6_RP_MEDIA_IS_GFX | /* WaSetMaskForGfxBusyness:chv (pre-production hw ?) */
+                  GEN6_RP_ENABLE |
+                  GEN6_RP_UP_BUSY_AVG |
+                  GEN6_RP_DOWN_IDLE_AVG);
+       val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
+       DRM_DEBUG_DRIVER("GPLL enabled? %s\n", val & 0x10 ? "yes" : "no");
+       DRM_DEBUG_DRIVER("GPU status: 0x%08x\n", val);
+       dev_priv->rps.cur_freq = (val >> 8) & 0xff;
+       DRM_DEBUG_DRIVER("current GPU freq: %d MHz (%u)\n",
+                        vlv_gpu_freq(dev_priv, dev_priv->rps.cur_freq),
+                        dev_priv->rps.cur_freq);
+       DRM_DEBUG_DRIVER("setting GPU freq to %d MHz (%u)\n",
+                        vlv_gpu_freq(dev_priv, dev_priv->rps.efficient_freq),
+                        dev_priv->rps.efficient_freq);
+       valleyview_set_rps(dev_priv->dev, dev_priv->rps.efficient_freq);
+       gen8_enable_rps_interrupts(dev);
+       gen6_gt_force_wake_put(dev_priv, FORCEWAKE_ALL);
+ }
  static void valleyview_enable_rps(struct drm_device *dev)
  {
        struct drm_i915_private *dev_priv = dev->dev_private;
        I915_WRITE(GEN6_RP_DOWN_EI, 350000);
  
        I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
+       I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 0xf4240);
  
        I915_WRITE(GEN6_RP_CONTROL,
                   GEN6_RP_MEDIA_TURBO |
  
        /* allows RC6 residency counter to work */
        I915_WRITE(VLV_COUNTER_CONTROL,
-                  _MASKED_BIT_ENABLE(VLV_COUNT_RANGE_HIGH |
+                  _MASKED_BIT_ENABLE(VLV_MEDIA_RC0_COUNT_EN |
+                                     VLV_RENDER_RC0_COUNT_EN |
                                      VLV_MEDIA_RC6_COUNT_EN |
                                      VLV_RENDER_RC6_COUNT_EN));
        if (intel_enable_rc6(dev) & INTEL_RC6_ENABLE)
                rc6_mode = GEN7_RC_CTL_TO_MODE | VLV_RC_CTL_CTX_RST_PARALLEL;
  
@@@ -4314,16 -4595,18 +4595,16 @@@ static u16 pvid_to_extvid(struct drm_i9
  
  static void __i915_update_gfx_val(struct drm_i915_private *dev_priv)
  {
 -      struct timespec now, diff1;
 -      u64 diff;
 -      unsigned long diffms;
 +      u64 now, diff, diffms;
        u32 count;
  
        assert_spin_locked(&mchdev_lock);
  
 -      getrawmonotonic(&now);
 -      diff1 = timespec_sub(now, dev_priv->ips.last_time2);
 +      now = ktime_get_raw_ns();
 +      diffms = now - dev_priv->ips.last_time2;
 +      do_div(diffms, NSEC_PER_MSEC);
  
        /* Don't divide by 0 */
 -      diffms = diff1.tv_sec * 1000 + diff1.tv_nsec / 1000000;
        if (!diffms)
                return;
  
@@@ -4666,33 -4949,60 +4947,60 @@@ void intel_init_gt_powersave(struct drm
  {
        i915.enable_rc6 = sanitize_rc6_option(dev, i915.enable_rc6);
  
-       if (IS_VALLEYVIEW(dev))
+       if (IS_CHERRYVIEW(dev))
+               cherryview_init_gt_powersave(dev);
+       else if (IS_VALLEYVIEW(dev))
                valleyview_init_gt_powersave(dev);
  }
  
  void intel_cleanup_gt_powersave(struct drm_device *dev)
  {
-       if (IS_VALLEYVIEW(dev))
+       if (IS_CHERRYVIEW(dev))
+               return;
+       else if (IS_VALLEYVIEW(dev))
                valleyview_cleanup_gt_powersave(dev);
  }
  
+ /**
+  * intel_suspend_gt_powersave - suspend PM work and helper threads
+  * @dev: drm device
+  *
+  * We don't want to disable RC6 or other features here, we just want
+  * to make sure any work we've queued has finished and won't bother
+  * us while we're suspended.
+  */
+ void intel_suspend_gt_powersave(struct drm_device *dev)
+ {
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       /* Interrupts should be disabled already to avoid re-arming. */
+       WARN_ON(intel_irqs_enabled(dev_priv));
+       flush_delayed_work(&dev_priv->rps.delayed_resume_work);
+       cancel_work_sync(&dev_priv->rps.work);
+       /* Force GPU to min freq during suspend */
+       gen6_rps_idle(dev_priv);
+ }
  void intel_disable_gt_powersave(struct drm_device *dev)
  {
        struct drm_i915_private *dev_priv = dev->dev_private;
  
        /* Interrupts should be disabled already to avoid re-arming. */
-       WARN_ON(dev->irq_enabled);
+       WARN_ON(intel_irqs_enabled(dev_priv));
  
        if (IS_IRONLAKE_M(dev)) {
                ironlake_disable_drps(dev);
                ironlake_disable_rc6(dev);
-       } else if (IS_GEN6(dev) || IS_GEN7(dev) || IS_BROADWELL(dev)) {
-               if (cancel_delayed_work_sync(&dev_priv->rps.delayed_resume_work))
-                       intel_runtime_pm_put(dev_priv);
+       } else if (INTEL_INFO(dev)->gen >= 6) {
+               intel_suspend_gt_powersave(dev);
  
-               cancel_work_sync(&dev_priv->rps.work);
                mutex_lock(&dev_priv->rps.hw_lock);
-               if (IS_VALLEYVIEW(dev))
+               if (IS_CHERRYVIEW(dev))
+                       cherryview_disable_rps(dev);
+               else if (IS_VALLEYVIEW(dev))
                        valleyview_disable_rps(dev);
                else
                        gen6_disable_rps(dev);
@@@ -4710,7 -5020,9 +5018,9 @@@ static void intel_gen6_powersave_work(s
  
        mutex_lock(&dev_priv->rps.hw_lock);
  
-       if (IS_VALLEYVIEW(dev)) {
+       if (IS_CHERRYVIEW(dev)) {
+               cherryview_enable_rps(dev);
+       } else if (IS_VALLEYVIEW(dev)) {
                valleyview_enable_rps(dev);
        } else if (IS_BROADWELL(dev)) {
                gen8_enable_rps(dev);
@@@ -4735,7 -5047,7 +5045,7 @@@ void intel_enable_gt_powersave(struct d
                ironlake_enable_rc6(dev);
                intel_init_emon(dev);
                mutex_unlock(&dev->struct_mutex);
-       } else if (IS_GEN6(dev) || IS_GEN7(dev) || IS_BROADWELL(dev)) {
+       } else if (INTEL_INFO(dev)->gen >= 6) {
                /*
                 * PCU communication is slow and this doesn't need to be
                 * done at any specific time, so do this out of our fast path
@@@ -5108,7 -5420,7 +5418,7 @@@ static void gen8_init_clock_gating(stru
        I915_WRITE(GAMTARBMODE, _MASKED_BIT_ENABLE(ARB_MODE_BWGTLB_DISABLE));
  
        I915_WRITE(_3D_CHICKEN3,
-                  _3D_CHICKEN_SDE_LIMIT_FIFO_POLY_DEPTH(2));
+                  _MASKED_BIT_ENABLE(_3D_CHICKEN_SDE_LIMIT_FIFO_POLY_DEPTH(2)));
  
        I915_WRITE(COMMON_SLICE_CHICKEN2,
                   _MASKED_BIT_ENABLE(GEN8_CSC2_SBE_VUE_CACHE_CONSERVATIVE));
@@@ -5343,10 -5655,6 +5653,6 @@@ static void valleyview_init_clock_gatin
        }
        DRM_DEBUG_DRIVER("DDR speed: %d MHz", dev_priv->mem_freq);
  
-       dev_priv->vlv_cdclk_freq = valleyview_cur_cdclk(dev_priv);
-       DRM_DEBUG_DRIVER("Current CD clock rate: %d MHz",
-                        dev_priv->vlv_cdclk_freq);
        I915_WRITE(DSPCLK_GATE_D, VRHUNIT_CLOCK_GATE_DISABLE);
  
        /* WaDisableEarlyCull:vlv */
  static void cherryview_init_clock_gating(struct drm_device *dev)
  {
        struct drm_i915_private *dev_priv = dev->dev_private;
+       u32 val;
+       mutex_lock(&dev_priv->rps.hw_lock);
+       val = vlv_punit_read(dev_priv, CCK_FUSE_REG);
+       mutex_unlock(&dev_priv->rps.hw_lock);
+       switch ((val >> 2) & 0x7) {
+       case 0:
+       case 1:
+                       dev_priv->rps.cz_freq = CHV_CZ_CLOCK_FREQ_MODE_200;
+                       dev_priv->mem_freq = 1600;
+                       break;
+       case 2:
+                       dev_priv->rps.cz_freq = CHV_CZ_CLOCK_FREQ_MODE_267;
+                       dev_priv->mem_freq = 1600;
+                       break;
+       case 3:
+                       dev_priv->rps.cz_freq = CHV_CZ_CLOCK_FREQ_MODE_333;
+                       dev_priv->mem_freq = 2000;
+                       break;
+       case 4:
+                       dev_priv->rps.cz_freq = CHV_CZ_CLOCK_FREQ_MODE_320;
+                       dev_priv->mem_freq = 1600;
+                       break;
+       case 5:
+                       dev_priv->rps.cz_freq = CHV_CZ_CLOCK_FREQ_MODE_400;
+                       dev_priv->mem_freq = 1600;
+                       break;
+       }
+       DRM_DEBUG_DRIVER("DDR speed: %d MHz", dev_priv->mem_freq);
  
        I915_WRITE(DSPCLK_GATE_D, VRHUNIT_CLOCK_GATE_DISABLE);
  
@@@ -5661,7 -5998,6 +5996,6 @@@ bool intel_display_power_enabled(struc
  static void hsw_power_well_post_enable(struct drm_i915_private *dev_priv)
  {
        struct drm_device *dev = dev_priv->dev;
-       unsigned long irqflags;
  
        /*
         * After we re-enable the power well, if we touch VGA register 0x3d5
        outb(inb(VGA_MSR_READ), VGA_MSR_WRITE);
        vga_put(dev->pdev, VGA_RSRC_LEGACY_IO);
  
-       if (IS_BROADWELL(dev)) {
-               spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
-               I915_WRITE(GEN8_DE_PIPE_IMR(PIPE_B),
-                          dev_priv->de_irq_mask[PIPE_B]);
-               I915_WRITE(GEN8_DE_PIPE_IER(PIPE_B),
-                          ~dev_priv->de_irq_mask[PIPE_B] |
-                          GEN8_PIPE_VBLANK);
-               I915_WRITE(GEN8_DE_PIPE_IMR(PIPE_C),
-                          dev_priv->de_irq_mask[PIPE_C]);
-               I915_WRITE(GEN8_DE_PIPE_IER(PIPE_C),
-                          ~dev_priv->de_irq_mask[PIPE_C] |
-                          GEN8_PIPE_VBLANK);
-               POSTING_READ(GEN8_DE_PIPE_IER(PIPE_C));
-               spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
-       }
+       if (IS_BROADWELL(dev))
+               gen8_irq_power_well_post_enable(dev_priv);
  }
  
  static void hsw_set_power_well(struct drm_i915_private *dev_priv,
@@@ -5762,34 -6085,13 +6083,13 @@@ static bool i9xx_always_on_power_well_e
        return true;
  }
  
void __vlv_set_power_well(struct drm_i915_private *dev_priv,
-                         enum punit_power_well power_well_id, bool enable)
static void vlv_set_power_well(struct drm_i915_private *dev_priv,
+                              struct i915_power_well *power_well, bool enable)
  {
-       struct drm_device *dev = dev_priv->dev;
+       enum punit_power_well power_well_id = power_well->data;
        u32 mask;
        u32 state;
        u32 ctrl;
-       enum pipe pipe;
-       if (power_well_id == PUNIT_POWER_WELL_DPIO_CMN_BC) {
-               if (enable) {
-                       /*
-                        * Enable the CRI clock source so we can get at the
-                        * display and the reference clock for VGA
-                        * hotplug / manual detection.
-                        */
-                       I915_WRITE(DPLL(PIPE_B), I915_READ(DPLL(PIPE_B)) |
-                                  DPLL_REFA_CLK_ENABLE_VLV |
-                                  DPLL_INTEGRATED_CRI_CLK_VLV);
-                       udelay(1); /* >10ns for cmnreset, >0ns for sidereset */
-               } else {
-                       for_each_pipe(pipe)
-                               assert_pll_disabled(dev_priv, pipe);
-                       /* Assert common reset */
-                       I915_WRITE(DPIO_CTL, I915_READ(DPIO_CTL) &
-                                  ~DPIO_CMNRST);
-               }
-       }
  
        mask = PUNIT_PWRGT_MASK(power_well_id);
        state = enable ? PUNIT_PWRGT_PWR_ON(power_well_id) :
  
  out:
        mutex_unlock(&dev_priv->rps.hw_lock);
-       /*
-        * From VLV2A0_DP_eDP_DPIO_driver_vbios_notes_10.docx -
-        *  6.  De-assert cmn_reset/side_reset. Same as VLV X0.
-        *   a. GUnit 0x2110 bit[0] set to 1 (def 0)
-        *   b. The other bits such as sfr settings / modesel may all
-        *      be set to 0.
-        *
-        * This should only be done on init and resume from S3 with
-        * both PLLs disabled, or we risk losing DPIO and PLL
-        * synchronization.
-        */
-       if (power_well_id == PUNIT_POWER_WELL_DPIO_CMN_BC && enable)
-               I915_WRITE(DPIO_CTL, I915_READ(DPIO_CTL) | DPIO_CMNRST);
- }
- static void vlv_set_power_well(struct drm_i915_private *dev_priv,
-                              struct i915_power_well *power_well, bool enable)
- {
-       enum punit_power_well power_well_id = power_well->data;
-       __vlv_set_power_well(dev_priv, power_well_id, enable);
  }
  
  static void vlv_power_well_sync_hw(struct drm_i915_private *dev_priv,
@@@ -5930,6 -6210,53 +6208,53 @@@ static void vlv_display_power_well_disa
        vlv_set_power_well(dev_priv, power_well, false);
  }
  
+ static void vlv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
+                                          struct i915_power_well *power_well)
+ {
+       WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DPIO_CMN_BC);
+       /*
+        * Enable the CRI clock source so we can get at the
+        * display and the reference clock for VGA
+        * hotplug / manual detection.
+        */
+       I915_WRITE(DPLL(PIPE_B), I915_READ(DPLL(PIPE_B)) |
+                  DPLL_REFA_CLK_ENABLE_VLV | DPLL_INTEGRATED_CRI_CLK_VLV);
+       udelay(1); /* >10ns for cmnreset, >0ns for sidereset */
+       vlv_set_power_well(dev_priv, power_well, true);
+       /*
+        * From VLV2A0_DP_eDP_DPIO_driver_vbios_notes_10.docx -
+        *  6.  De-assert cmn_reset/side_reset. Same as VLV X0.
+        *   a. GUnit 0x2110 bit[0] set to 1 (def 0)
+        *   b. The other bits such as sfr settings / modesel may all
+        *      be set to 0.
+        *
+        * This should only be done on init and resume from S3 with
+        * both PLLs disabled, or we risk losing DPIO and PLL
+        * synchronization.
+        */
+       I915_WRITE(DPIO_CTL, I915_READ(DPIO_CTL) | DPIO_CMNRST);
+ }
+ static void vlv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
+                                           struct i915_power_well *power_well)
+ {
+       struct drm_device *dev = dev_priv->dev;
+       enum pipe pipe;
+       WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DPIO_CMN_BC);
+       for_each_pipe(pipe)
+               assert_pll_disabled(dev_priv, pipe);
+       /* Assert common reset */
+       I915_WRITE(DPIO_CTL, I915_READ(DPIO_CTL) & ~DPIO_CMNRST);
+       vlv_set_power_well(dev_priv, power_well, false);
+ }
  static void check_power_well_state(struct drm_i915_private *dev_priv,
                                   struct i915_power_well *power_well)
  {
@@@ -6079,6 -6406,7 +6404,7 @@@ EXPORT_SYMBOL_GPL(i915_get_cdclk_freq)
        BIT(POWER_DOMAIN_PORT_DDI_D_2_LANES) |          \
        BIT(POWER_DOMAIN_PORT_DDI_D_4_LANES) |          \
        BIT(POWER_DOMAIN_PORT_CRT) |                    \
+       BIT(POWER_DOMAIN_PLLS) |                        \
        BIT(POWER_DOMAIN_INIT))
  #define HSW_DISPLAY_POWER_DOMAINS (                           \
        (POWER_DOMAIN_MASK & ~HSW_ALWAYS_ON_POWER_DOMAINS) |    \
@@@ -6178,6 -6506,13 +6504,13 @@@ static const struct i915_power_well_op
        .is_enabled = vlv_power_well_enabled,
  };
  
+ static const struct i915_power_well_ops vlv_dpio_cmn_power_well_ops = {
+       .sync_hw = vlv_power_well_sync_hw,
+       .enable = vlv_dpio_cmn_power_well_enable,
+       .disable = vlv_dpio_cmn_power_well_disable,
+       .is_enabled = vlv_power_well_enabled,
+ };
  static const struct i915_power_well_ops vlv_dpio_power_well_ops = {
        .sync_hw = vlv_power_well_sync_hw,
        .enable = vlv_power_well_enable,
@@@ -6238,10 -6573,25 +6571,25 @@@ static struct i915_power_well vlv_power
                .name = "dpio-common",
                .domains = VLV_DPIO_CMN_BC_POWER_DOMAINS,
                .data = PUNIT_POWER_WELL_DPIO_CMN_BC,
-               .ops = &vlv_dpio_power_well_ops,
+               .ops = &vlv_dpio_cmn_power_well_ops,
        },
  };
  
+ static struct i915_power_well *lookup_power_well(struct drm_i915_private *dev_priv,
+                                                enum punit_power_well power_well_id)
+ {
+       struct i915_power_domains *power_domains = &dev_priv->power_domains;
+       struct i915_power_well *power_well;
+       int i;
+       for_each_power_well(i, power_well, POWER_DOMAIN_MASK, power_domains) {
+               if (power_well->data == power_well_id)
+                       return power_well;
+       }
+       return NULL;
+ }
  #define set_power_wells(power_domains, __power_wells) ({              \
        (power_domains)->power_wells = (__power_wells);                 \
        (power_domains)->power_well_count = ARRAY_SIZE(__power_wells);  \
@@@ -6292,11 -6642,50 +6640,50 @@@ static void intel_power_domains_resume(
        mutex_unlock(&power_domains->lock);
  }
  
+ static void vlv_cmnlane_wa(struct drm_i915_private *dev_priv)
+ {
+       struct i915_power_well *cmn =
+               lookup_power_well(dev_priv, PUNIT_POWER_WELL_DPIO_CMN_BC);
+       struct i915_power_well *disp2d =
+               lookup_power_well(dev_priv, PUNIT_POWER_WELL_DISP2D);
+       /* nothing to do if common lane is already off */
+       if (!cmn->ops->is_enabled(dev_priv, cmn))
+               return;
+       /* If the display might be already active skip this */
+       if (disp2d->ops->is_enabled(dev_priv, disp2d) &&
+           I915_READ(DPIO_CTL) & DPIO_CMNRST)
+               return;
+       DRM_DEBUG_KMS("toggling display PHY side reset\n");
+       /* cmnlane needs DPLL registers */
+       disp2d->ops->enable(dev_priv, disp2d);
+       /*
+        * From VLV2A0_DP_eDP_HDMI_DPIO_driver_vbios_notes_11.docx:
+        * Need to assert and de-assert PHY SB reset by gating the
+        * common lane power, then un-gating it.
+        * Simply ungating isn't enough to reset the PHY enough to get
+        * ports and lanes running.
+        */
+       cmn->ops->disable(dev_priv, cmn);
+ }
  void intel_power_domains_init_hw(struct drm_i915_private *dev_priv)
  {
+       struct drm_device *dev = dev_priv->dev;
        struct i915_power_domains *power_domains = &dev_priv->power_domains;
  
        power_domains->initializing = true;
+       if (IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev)) {
+               mutex_lock(&power_domains->lock);
+               vlv_cmnlane_wa(dev_priv);
+               mutex_unlock(&power_domains->lock);
+       }
        /* For now, we need the power well to be always enabled. */
        intel_display_set_init_power(dev_priv, true);
        intel_power_domains_resume(dev_priv);
@@@ -6469,7 -6858,7 +6856,7 @@@ void intel_init_pm(struct drm_device *d
                                 (dev_priv->is_ddr3 == 1) ? "3" : "2",
                                 dev_priv->fsb_freq, dev_priv->mem_freq);
                        /* Disable CxSR and never update its watermark again */
-                       pineview_disable_cxsr(dev);
+                       intel_set_memory_cxsr(dev_priv, false);
                        dev_priv->display.update_wm = NULL;
                } else
                        dev_priv->display.update_wm = pineview_update_wm;
@@@ -6552,7 -6941,7 +6939,7 @@@ int sandybridge_pcode_write(struct drm_
        return 0;
  }
  
int vlv_gpu_freq(struct drm_i915_private *dev_priv, int val)
static int byt_gpu_freq(struct drm_i915_private *dev_priv, int val)
  {
        int div;
  
        return DIV_ROUND_CLOSEST(dev_priv->mem_freq * (val + 6 - 0xbd), 4 * div);
  }
  
int vlv_freq_opcode(struct drm_i915_private *dev_priv, int val)
static int byt_freq_opcode(struct drm_i915_private *dev_priv, int val)
  {
        int mul;
  
        return DIV_ROUND_CLOSEST(4 * mul * val, dev_priv->mem_freq) + 0xbd - 6;
  }
  
+ static int chv_gpu_freq(struct drm_i915_private *dev_priv, int val)
+ {
+       int div, freq;
+       switch (dev_priv->rps.cz_freq) {
+       case 200:
+               div = 5;
+               break;
+       case 267:
+               div = 6;
+               break;
+       case 320:
+       case 333:
+       case 400:
+               div = 8;
+               break;
+       default:
+               return -1;
+       }
+       freq = (DIV_ROUND_CLOSEST((dev_priv->rps.cz_freq * val), 2 * div) / 2);
+       return freq;
+ }
+ static int chv_freq_opcode(struct drm_i915_private *dev_priv, int val)
+ {
+       int mul, opcode;
+       switch (dev_priv->rps.cz_freq) {
+       case 200:
+               mul = 5;
+               break;
+       case 267:
+               mul = 6;
+               break;
+       case 320:
+       case 333:
+       case 400:
+               mul = 8;
+               break;
+       default:
+               return -1;
+       }
+       opcode = (DIV_ROUND_CLOSEST((val * 2 * mul), dev_priv->rps.cz_freq) * 2);
+       return opcode;
+ }
+ int vlv_gpu_freq(struct drm_i915_private *dev_priv, int val)
+ {
+       int ret = -1;
+       if (IS_CHERRYVIEW(dev_priv->dev))
+               ret = chv_gpu_freq(dev_priv, val);
+       else if (IS_VALLEYVIEW(dev_priv->dev))
+               ret = byt_gpu_freq(dev_priv, val);
+       return ret;
+ }
+ int vlv_freq_opcode(struct drm_i915_private *dev_priv, int val)
+ {
+       int ret = -1;
+       if (IS_CHERRYVIEW(dev_priv->dev))
+               ret = chv_freq_opcode(dev_priv, val);
+       else if (IS_VALLEYVIEW(dev_priv->dev))
+               ret = byt_freq_opcode(dev_priv, val);
+       return ret;
+ }
  void intel_pm_setup(struct drm_device *dev)
  {
        struct drm_i915_private *dev_priv = dev->dev_private;
                          intel_gen6_powersave_work);
  
        dev_priv->pm.suspended = false;
-       dev_priv->pm.irqs_disabled = false;
+       dev_priv->pm._irqs_disabled = false;
  }
index 959f0866d9935f8a2a9c141684fb4ef35e79e946,a773830c6c40cdf8570d2d90814379eeeb6aaf14..092d067f93e16534aca712779ce2aa5705069395
   *   2.38.0 - RADEON_GEM_OP (GET_INITIAL_DOMAIN, SET_INITIAL_DOMAIN),
   *            CIK: 1D and linear tiling modes contain valid PIPE_CONFIG
   *   2.39.0 - Add INFO query for number of active CUs
+  *   2.40.0 - Add RADEON_GEM_GTT_WC/UC, flush HDP cache before submitting
+  *            CS to GPU
   */
  #define KMS_DRIVER_MAJOR      2
- #define KMS_DRIVER_MINOR      39
+ #define KMS_DRIVER_MINOR      40
  #define KMS_DRIVER_PATCHLEVEL 0
  int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags);
  int radeon_driver_unload_kms(struct drm_device *dev);
@@@ -132,7 -134,6 +134,7 @@@ struct drm_gem_object *radeon_gem_prime
                                                        struct sg_table *sg);
  int radeon_gem_prime_pin(struct drm_gem_object *obj);
  void radeon_gem_prime_unpin(struct drm_gem_object *obj);
 +struct reservation_object *radeon_gem_prime_res_obj(struct drm_gem_object *);
  void *radeon_gem_prime_vmap(struct drm_gem_object *obj);
  void radeon_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr);
  extern long radeon_kms_compat_ioctl(struct file *filp, unsigned int cmd,
@@@ -174,9 -175,10 +176,10 @@@ int radeon_dpm = -1
  int radeon_aspm = -1;
  int radeon_runtime_pm = -1;
  int radeon_hard_reset = 0;
- int radeon_vm_size = 4;
- int radeon_vm_block_size = 9;
+ int radeon_vm_size = 8;
+ int radeon_vm_block_size = -1;
  int radeon_deep_color = 0;
+ int radeon_use_pflipirq = 2;
  
  MODULE_PARM_DESC(no_wb, "Disable AGP writeback for scratch registers");
  module_param_named(no_wb, radeon_no_wb, int, 0444);
@@@ -247,12 -249,15 +250,15 @@@ module_param_named(hard_reset, radeon_h
  MODULE_PARM_DESC(vm_size, "VM address space size in gigabytes (default 4GB)");
  module_param_named(vm_size, radeon_vm_size, int, 0444);
  
- MODULE_PARM_DESC(vm_block_size, "VM page table size in bits (default 9)");
+ MODULE_PARM_DESC(vm_block_size, "VM page table size in bits (default depending on vm_size)");
  module_param_named(vm_block_size, radeon_vm_block_size, int, 0444);
  
  MODULE_PARM_DESC(deep_color, "Deep Color support (1 = enable, 0 = disable (default))");
  module_param_named(deep_color, radeon_deep_color, int, 0444);
  
+ MODULE_PARM_DESC(use_pflipirq, "Pflip irqs for pageflip completion (0 = disable, 1 = as fallback, 2 = exclusive (default))");
+ module_param_named(use_pflipirq, radeon_use_pflipirq, int, 0444);
  static struct pci_device_id pciidlist[] = {
        radeon_PCI_IDS
  };
@@@ -567,7 -572,6 +573,7 @@@ static struct drm_driver kms_driver = 
        .gem_prime_import = drm_gem_prime_import,
        .gem_prime_pin = radeon_gem_prime_pin,
        .gem_prime_unpin = radeon_gem_prime_unpin,
 +      .gem_prime_res_obj = radeon_gem_prime_res_obj,
        .gem_prime_get_sg_table = radeon_gem_prime_get_sg_table,
        .gem_prime_import_sg_table = radeon_gem_prime_import_sg_table,
        .gem_prime_vmap = radeon_gem_prime_vmap,
index 28d71070c389015e2dc003d27b2302c19d7eecca,f7e48d329db38c0ad802c2fe5cfacf5235195e15..0b16f2cbcf170b5a9a6d23b6683eedb827b890b4
@@@ -65,7 -65,7 +65,7 @@@ struct drm_gem_object *radeon_gem_prime
        int ret;
  
        ret = radeon_bo_create(rdev, size, PAGE_SIZE, false,
-                              RADEON_GEM_DOMAIN_GTT, sg, &bo);
+                              RADEON_GEM_DOMAIN_GTT, 0, sg, &bo);
        if (ret)
                return ERR_PTR(ret);
  
@@@ -103,11 -103,3 +103,11 @@@ void radeon_gem_prime_unpin(struct drm_
        radeon_bo_unpin(bo);
        radeon_bo_unreserve(bo);
  }
 +
 +
 +struct reservation_object *radeon_gem_prime_res_obj(struct drm_gem_object *obj)
 +{
 +      struct radeon_bo *bo = gem_to_radeon_bo(obj);
 +
 +      return bo->tbo.resv;
 +}
index 78cc8143760ab3977e73b9e3d99d410ce0e57e20,2545c7a468a2f3c316729cfa731bf238f6eac73f..ce023fa3e8ae14bdfc3557295d69a7d8d12f9cd3
@@@ -16,6 -16,7 +16,7 @@@
  #include <linux/dma-buf.h>
  #include <drm/tegra_drm.h>
  
+ #include "drm.h"
  #include "gem.h"
  
  static inline struct tegra_bo *host1x_to_tegra_bo(struct host1x_bo *bo)
@@@ -126,7 -127,7 +127,7 @@@ struct tegra_bo *tegra_bo_create(struc
                goto err_mmap;
  
        if (flags & DRM_TEGRA_GEM_CREATE_TILED)
-               bo->flags |= TEGRA_BO_TILED;
+               bo->tiling.mode = TEGRA_BO_TILING_MODE_TILED;
  
        if (flags & DRM_TEGRA_GEM_CREATE_BOTTOM_UP)
                bo->flags |= TEGRA_BO_BOTTOM_UP;
@@@ -259,8 -260,10 +260,10 @@@ int tegra_bo_dumb_create(struct drm_fil
                         struct drm_mode_create_dumb *args)
  {
        int min_pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
+       struct tegra_drm *tegra = drm->dev_private;
        struct tegra_bo *bo;
  
+       min_pitch = round_up(min_pitch, tegra->pitch_align);
        if (args->pitch < min_pitch)
                args->pitch = min_pitch;
  
@@@ -420,7 -423,7 +423,7 @@@ struct dma_buf *tegra_gem_prime_export(
                                       int flags)
  {
        return dma_buf_export(gem, &tegra_gem_prime_dmabuf_ops, gem->size,
 -                            flags);
 +                            flags, NULL);
  }
  
  struct drm_gem_object *tegra_gem_prime_import(struct drm_device *drm,
index c886c024c637e88053a9ccd32c660ba0984fb075,c1811750cc8d0e8f1e29bec8b7c11a1db52ec9ae..99f731757c4bf6748ec4584103a2d5527f4618eb
  #include <drm/ttm/ttm_module.h>
  #include "vmwgfx_fence.h"
  
- #define VMWGFX_DRIVER_DATE "20140325"
+ #define VMWGFX_DRIVER_DATE "20140704"
  #define VMWGFX_DRIVER_MAJOR 2
  #define VMWGFX_DRIVER_MINOR 6
- #define VMWGFX_DRIVER_PATCHLEVEL 0
+ #define VMWGFX_DRIVER_PATCHLEVEL 1
  #define VMWGFX_FILE_PAGE_OFFSET 0x00100000
  #define VMWGFX_FIFO_STATIC_SIZE (1024*1024)
  #define VMWGFX_MAX_RELOCATIONS 2048
  #define VMW_RES_FENCE ttm_driver_type3
  #define VMW_RES_SHADER ttm_driver_type4
  
- struct vmw_compat_shader_manager;
  struct vmw_fpriv {
        struct drm_master *locked_master;
        struct ttm_object_file *tfile;
        struct list_head fence_events;
        bool gb_aware;
-       struct vmw_compat_shader_manager *shman;
  };
  
  struct vmw_dma_buffer {
@@@ -124,6 -121,10 +121,10 @@@ struct vmw_resource 
        void (*hw_destroy) (struct vmw_resource *res);
  };
  
+ /*
+  * Resources that are managed using ioctls.
+  */
  enum vmw_res_type {
        vmw_res_context,
        vmw_res_surface,
        vmw_res_max
  };
  
+ /*
+  * Resources that are managed using command streams.
+  */
+ enum vmw_cmdbuf_res_type {
+       vmw_cmdbuf_res_compat_shader
+ };
+ struct vmw_cmdbuf_res_manager;
  struct vmw_cursor_snooper {
        struct drm_crtc *crtc;
        size_t age;
@@@ -159,8 -169,8 +169,8 @@@ struct vmw_surface 
  
  struct vmw_marker_queue {
        struct list_head head;
 -      struct timespec lag;
 -      struct timespec lag_time;
 +      u64 lag;
 +      u64 lag_time;
        spinlock_t lock;
  };
  
@@@ -341,7 -351,7 +351,7 @@@ struct vmw_sw_context
        bool needs_post_query_barrier;
        struct vmw_resource *error_resource;
        struct vmw_ctx_binding_state staged_bindings;
-       struct list_head staged_shaders;
+       struct list_head staged_cmd_res;
  };
  
  struct vmw_legacy_display;
@@@ -974,7 -984,8 +984,8 @@@ extern void vmw_context_binding_res_lis
  extern void vmw_context_binding_res_list_scrub(struct list_head *head);
  extern int vmw_context_rebind_all(struct vmw_resource *ctx);
  extern struct list_head *vmw_context_binding_list(struct vmw_resource *ctx);
+ extern struct vmw_cmdbuf_res_manager *
+ vmw_context_res_man(struct vmw_resource *ctx);
  /*
   * Surface management - vmwgfx_surface.c
   */
@@@ -1008,27 -1019,42 +1019,42 @@@ extern int vmw_shader_define_ioctl(stru
                                   struct drm_file *file_priv);
  extern int vmw_shader_destroy_ioctl(struct drm_device *dev, void *data,
                                    struct drm_file *file_priv);
- extern int vmw_compat_shader_lookup(struct vmw_compat_shader_manager *man,
-                                   SVGA3dShaderType shader_type,
-                                   u32 *user_key);
- extern void vmw_compat_shaders_commit(struct vmw_compat_shader_manager *man,
-                                     struct list_head *list);
- extern void vmw_compat_shaders_revert(struct vmw_compat_shader_manager *man,
-                                     struct list_head *list);
- extern int vmw_compat_shader_remove(struct vmw_compat_shader_manager *man,
-                                   u32 user_key,
-                                   SVGA3dShaderType shader_type,
-                                   struct list_head *list);
- extern int vmw_compat_shader_add(struct vmw_compat_shader_manager *man,
+ extern int vmw_compat_shader_add(struct vmw_private *dev_priv,
+                                struct vmw_cmdbuf_res_manager *man,
                                 u32 user_key, const void *bytecode,
                                 SVGA3dShaderType shader_type,
                                 size_t size,
-                                struct ttm_object_file *tfile,
                                 struct list_head *list);
- extern struct vmw_compat_shader_manager *
- vmw_compat_shader_man_create(struct vmw_private *dev_priv);
- extern void
- vmw_compat_shader_man_destroy(struct vmw_compat_shader_manager *man);
+ extern int vmw_compat_shader_remove(struct vmw_cmdbuf_res_manager *man,
+                                   u32 user_key, SVGA3dShaderType shader_type,
+                                   struct list_head *list);
+ extern struct vmw_resource *
+ vmw_compat_shader_lookup(struct vmw_cmdbuf_res_manager *man,
+                        u32 user_key, SVGA3dShaderType shader_type);
+ /*
+  * Command buffer managed resources - vmwgfx_cmdbuf_res.c
+  */
+ extern struct vmw_cmdbuf_res_manager *
+ vmw_cmdbuf_res_man_create(struct vmw_private *dev_priv);
+ extern void vmw_cmdbuf_res_man_destroy(struct vmw_cmdbuf_res_manager *man);
+ extern size_t vmw_cmdbuf_res_man_size(void);
+ extern struct vmw_resource *
+ vmw_cmdbuf_res_lookup(struct vmw_cmdbuf_res_manager *man,
+                     enum vmw_cmdbuf_res_type res_type,
+                     u32 user_key);
+ extern void vmw_cmdbuf_res_revert(struct list_head *list);
+ extern void vmw_cmdbuf_res_commit(struct list_head *list);
+ extern int vmw_cmdbuf_res_add(struct vmw_cmdbuf_res_manager *man,
+                             enum vmw_cmdbuf_res_type res_type,
+                             u32 user_key,
+                             struct vmw_resource *res,
+                             struct list_head *list);
+ extern int vmw_cmdbuf_res_remove(struct vmw_cmdbuf_res_manager *man,
+                                enum vmw_cmdbuf_res_type res_type,
+                                u32 user_key,
+                                struct list_head *list);
  
  
  /**
index 47ee6c79857a4b59009025df6b4750b890349f0f,6f54ff4f937277e37af48ba63796b4d9c605609f..6b22106534d8d62451e18f5fe453bd3c2bc2078f
@@@ -202,7 -202,7 +202,7 @@@ static const struct file_operations imx
  
  void imx_drm_connector_destroy(struct drm_connector *connector)
  {
-       drm_sysfs_connector_remove(connector);
+       drm_connector_unregister(connector);
        drm_connector_cleanup(connector);
  }
  EXPORT_SYMBOL_GPL(imx_drm_connector_destroy);
@@@ -293,10 -293,10 +293,10 @@@ static int imx_drm_driver_load(struct d
         * userspace will expect to be able to access DRM at this point.
         */
        list_for_each_entry(connector, &drm->mode_config.connector_list, head) {
-               ret = drm_sysfs_connector_add(connector);
+               ret = drm_connector_register(connector);
                if (ret) {
                        dev_err(drm->dev,
-                               "[CONNECTOR:%d:%s] drm_sysfs_connector_add failed: %d\n",
+                               "[CONNECTOR:%d:%s] drm_connector_register failed: %d\n",
                                connector->base.id,
                                connector->name, ret);
                        goto err_unbind;
@@@ -570,6 -570,22 +570,6 @@@ static int compare_of(struct device *de
        return dev->of_node == np;
  }
  
 -static LIST_HEAD(imx_drm_components);
 -
 -static int imx_drm_add_components(struct device *master, struct master *m)
 -{
 -      struct imx_drm_component *component;
 -      int ret;
 -
 -      list_for_each_entry(component, &imx_drm_components, list) {
 -              ret = component_master_add_child(m, compare_of,
 -                                               component->of_node);
 -              if (ret)
 -                      return ret;
 -      }
 -      return 0;
 -}
 -
  static int imx_drm_bind(struct device *dev)
  {
        return drm_platform_init(&imx_drm_driver, to_platform_device(dev));
@@@ -581,14 -597,43 +581,14 @@@ static void imx_drm_unbind(struct devic
  }
  
  static const struct component_master_ops imx_drm_ops = {
 -      .add_components = imx_drm_add_components,
        .bind = imx_drm_bind,
        .unbind = imx_drm_unbind,
  };
  
 -static struct imx_drm_component *imx_drm_find_component(struct device *dev,
 -              struct device_node *node)
 -{
 -      struct imx_drm_component *component;
 -
 -      list_for_each_entry(component, &imx_drm_components, list)
 -              if (component->of_node == node)
 -                      return component;
 -
 -      return NULL;
 -}
 -
 -static int imx_drm_add_component(struct device *dev, struct device_node *node)
 -{
 -      struct imx_drm_component *component;
 -
 -      if (imx_drm_find_component(dev, node))
 -              return 0;
 -
 -      component = devm_kzalloc(dev, sizeof(*component), GFP_KERNEL);
 -      if (!component)
 -              return -ENOMEM;
 -
 -      component->of_node = node;
 -      list_add_tail(&component->list, &imx_drm_components);
 -
 -      return 0;
 -}
 -
  static int imx_drm_platform_probe(struct platform_device *pdev)
  {
        struct device_node *ep, *port, *remote;
 +      struct component_match *match = NULL;
        int ret;
        int i;
  
                if (!port)
                        break;
  
 -              ret = imx_drm_add_component(&pdev->dev, port);
 -              if (ret < 0)
 -                      return ret;
 +              component_match_add(&pdev->dev, &match, compare_of, port);
        }
  
        if (i == 0) {
                                continue;
                        }
  
 -                      ret = imx_drm_add_component(&pdev->dev, remote);
 +                      component_match_add(&pdev->dev, &match, compare_of, remote);
                        of_node_put(remote);
 -                      if (ret < 0)
 -                              return ret;
                }
                of_node_put(port);
        }
        if (ret)
                return ret;
  
 -      return component_master_add(&pdev->dev, &imx_drm_ops);
 +      return component_master_add_with_match(&pdev->dev, &imx_drm_ops, match);
  }
  
  static int imx_drm_platform_remove(struct platform_device *pdev)
diff --combined include/drm/drmP.h
index e41f17ea1f139c1616a27dfe02e32f72cf71bde0,a12fbbac373c6e6c848812d9299cdcfd10c319eb..196890735367daf53a796099ca21b153d55b560c
@@@ -83,7 -83,6 +83,7 @@@ struct drm_device
  
  struct device_node;
  struct videomode;
 +struct reservation_object;
  
  #include <drm/drm_os_linux.h>
  #include <drm/drm_hashtab.h>
@@@ -152,8 -151,6 +152,6 @@@ int drm_err(const char *func, const cha
                                     also include looping detection. */
  
  #define DRM_MAGIC_HASH_ORDER  4  /**< Size of key hash table. Must be power of 2. */
- #define DRM_KERNEL_CONTEXT    0        /**< Change drm_resctx if changed */
- #define DRM_RESERVED_CONTEXTS 1        /**< Change drm_resctx if changed */
  
  #define DRM_MAP_HASH_OFFSET 0x10000000
  
@@@ -347,18 -344,6 +345,6 @@@ struct drm_waitlist 
        spinlock_t write_lock;
  };
  
- struct drm_freelist {
-       int initialized;               /**< Freelist in use */
-       atomic_t count;                /**< Number of free buffers */
-       struct drm_buf *next;          /**< End pointer */
-       wait_queue_head_t waiting;     /**< Processes waiting on free bufs */
-       int low_mark;                  /**< Low water mark */
-       int high_mark;                 /**< High water mark */
-       atomic_t wfh;                  /**< If waiting for high mark */
-       spinlock_t lock;
- };
  typedef struct drm_dma_handle {
        dma_addr_t busaddr;
        void *vaddr;
@@@ -376,7 -361,8 +362,8 @@@ struct drm_buf_entry 
        int page_order;
        struct drm_dma_handle **seglist;
  
-       struct drm_freelist freelist;
+       int low_mark;                   /**< Low water mark */
+       int high_mark;                  /**< High water mark */
  };
  
  /* Event queued up for userspace to read */
@@@ -397,7 -383,6 +384,6 @@@ struct drm_prime_file_private 
  
  /** File private data */
  struct drm_file {
-       unsigned always_authenticated :1;
        unsigned authenticated :1;
        /* Whether we're master for a minor. Protected by master_mutex */
        unsigned is_master :1;
        struct drm_prime_file_private prime;
  };
  
- /** Wait queue */
- struct drm_queue {
-       atomic_t use_count;             /**< Outstanding uses (+1) */
-       atomic_t finalization;          /**< Finalization in progress */
-       atomic_t block_count;           /**< Count of processes waiting */
-       atomic_t block_read;            /**< Queue blocked for reads */
-       wait_queue_head_t read_queue;   /**< Processes waiting on block_read */
-       atomic_t block_write;           /**< Queue blocked for writes */
-       wait_queue_head_t write_queue;  /**< Processes waiting on block_write */
-       atomic_t total_queued;          /**< Total queued statistic */
-       atomic_t total_flushed;         /**< Total flushes statistic */
-       atomic_t total_locks;           /**< Total locks statistics */
-       enum drm_ctx_flags flags;       /**< Context preserving and 2D-only */
-       struct drm_waitlist waitlist;   /**< Pending buffers */
-       wait_queue_head_t flush_queue;  /**< Processes waiting until flush */
- };
  /**
   * Lock data.
   */
@@@ -567,15 -535,6 +536,6 @@@ struct drm_map_list 
        struct drm_master *master;
  };
  
- /**
-  * Context handle list
-  */
- struct drm_ctx_list {
-       struct list_head head;          /**< list head */
-       drm_context_t handle;           /**< context handle */
-       struct drm_file *tag;           /**< associated fd private data */
- };
  /* location of GART table */
  #define DRM_ATI_GART_MAIN 1
  #define DRM_ATI_GART_FB   2
@@@ -924,8 -883,6 +884,8 @@@ struct drm_driver 
        /* low-level interface used by drm_gem_prime_{import,export} */
        int (*gem_prime_pin)(struct drm_gem_object *obj);
        void (*gem_prime_unpin)(struct drm_gem_object *obj);
 +      struct reservation_object * (*gem_prime_res_obj)(
 +                              struct drm_gem_object *obj);
        struct sg_table *(*gem_prime_get_sg_table)(struct drm_gem_object *obj);
        struct drm_gem_object *(*gem_prime_import_sg_table)(
                                struct drm_device *dev, size_t size,
@@@ -1218,7 -1175,6 +1178,6 @@@ extern bool drm_ioctl_flags(unsigned in
                                /* Device support (drm_fops.h) */
  extern struct mutex drm_global_mutex;
  extern int drm_open(struct inode *inode, struct file *filp);
- extern int drm_stub_open(struct inode *inode, struct file *filp);
  extern ssize_t drm_read(struct file *filp, char __user *buffer,
                        size_t count, loff_t *offset);
  extern int drm_release(struct inode *inode, struct file *filp);
@@@ -1256,29 -1212,6 +1215,6 @@@ extern int drm_setversion(struct drm_de
  extern int drm_noop(struct drm_device *dev, void *data,
                    struct drm_file *file_priv);
  
-                               /* Context IOCTL support (drm_context.h) */
- extern int drm_resctx(struct drm_device *dev, void *data,
-                     struct drm_file *file_priv);
- extern int drm_addctx(struct drm_device *dev, void *data,
-                     struct drm_file *file_priv);
- extern int drm_getctx(struct drm_device *dev, void *data,
-                     struct drm_file *file_priv);
- extern int drm_switchctx(struct drm_device *dev, void *data,
-                        struct drm_file *file_priv);
- extern int drm_newctx(struct drm_device *dev, void *data,
-                     struct drm_file *file_priv);
- extern int drm_rmctx(struct drm_device *dev, void *data,
-                    struct drm_file *file_priv);
- extern int drm_ctxbitmap_init(struct drm_device *dev);
- extern void drm_ctxbitmap_cleanup(struct drm_device *dev);
- extern void drm_ctxbitmap_free(struct drm_device *dev, int ctx_handle);
- extern int drm_setsareactx(struct drm_device *dev, void *data,
-                          struct drm_file *file_priv);
- extern int drm_getsareactx(struct drm_device *dev, void *data,
-                          struct drm_file *file_priv);
                                /* Authentication IOCTL support (drm_auth.h) */
  extern int drm_getmagic(struct drm_device *dev, void *data,
                        struct drm_file *file_priv);
@@@ -1398,17 -1331,12 +1334,12 @@@ extern void drm_master_put(struct drm_m
  extern void drm_put_dev(struct drm_device *dev);
  extern void drm_unplug_dev(struct drm_device *dev);
  extern unsigned int drm_debug;
- extern unsigned int drm_rnodes;
- extern unsigned int drm_universal_planes;
  
  extern unsigned int drm_vblank_offdelay;
  extern unsigned int drm_timestamp_precision;
  extern unsigned int drm_timestamp_monotonic;
  
  extern struct class *drm_class;
- extern struct dentry *drm_debugfs_root;
- extern struct idr drm_minors_idr;
  
  extern struct drm_local_map *drm_getsarea(struct drm_device *dev);
  
@@@ -1422,6 -1350,8 +1353,8 @@@ extern int drm_debugfs_create_files(con
  extern int drm_debugfs_remove_files(const struct drm_info_list *files,
                                    int count, struct drm_minor *minor);
  extern int drm_debugfs_cleanup(struct drm_minor *minor);
+ extern int drm_debugfs_connector_add(struct drm_connector *connector);
+ extern void drm_debugfs_connector_remove(struct drm_connector *connector);
  #else
  static inline int drm_debugfs_init(struct drm_minor *minor, int minor_id,
                                   struct dentry *root)
@@@ -1446,6 -1376,15 +1379,15 @@@ static inline int drm_debugfs_cleanup(s
  {
        return 0;
  }
+ static inline int drm_debugfs_connector_add(struct drm_connector *connector)
+ {
+       return 0;
+ }
+ static inline void drm_debugfs_connector_remove(struct drm_connector *connector)
+ {
+ }
  #endif
  
                                /* Info file support */
@@@ -1515,9 -1454,8 +1457,8 @@@ extern int drm_pci_set_unique(struct dr
  struct drm_sysfs_class;
  extern struct class *drm_sysfs_create(struct module *owner, char *name);
  extern void drm_sysfs_destroy(void);
- extern int drm_sysfs_device_add(struct drm_minor *minor);
+ extern struct device *drm_sysfs_minor_alloc(struct drm_minor *minor);
  extern void drm_sysfs_hotplug_event(struct drm_device *dev);
- extern void drm_sysfs_device_remove(struct drm_minor *minor);
  extern int drm_sysfs_connector_add(struct drm_connector *connector);
  extern void drm_sysfs_connector_remove(struct drm_connector *connector);
  
@@@ -1577,7 -1515,7 +1518,7 @@@ void drm_gem_free_mmap_offset(struct dr
  int drm_gem_create_mmap_offset(struct drm_gem_object *obj);
  int drm_gem_create_mmap_offset_size(struct drm_gem_object *obj, size_t size);
  
- struct page **drm_gem_get_pages(struct drm_gem_object *obj, gfp_t gfpmask);
+ struct page **drm_gem_get_pages(struct drm_gem_object *obj);
  void drm_gem_put_pages(struct drm_gem_object *obj, struct page **pages,
                bool dirty, bool accessed);