Re: [PATCH RFCv2 3/4] staging: etnaviv: add drm driver

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



On Fri, Sep 11, 2015 at 10:10 AM, Lucas Stach <l.stach@xxxxxxxxxxxxxx> wrote:
> From: Christian Gmeiner <christian.gmeiner@xxxxxxxxx>
>
> This is a squashed commit of the complete etnaviv DRM driver in order
> to make it easy for people to review the code by seeing the driver as a
> whole and is not intended for merging in this form.
>
> If you are interested in the history of individual commits:
> git://git.pengutronix.de/git/lst/linux.git etnaviv-for-upstream
>
> Signed-off-by: Christian Gmeiner <christian.gmeiner@xxxxxxxxx>
> Signed-off-by: Russell King <rmk+kernel@xxxxxxxxxxxxxxxx>
> Signed-off-by: Lucas Stach <l.stach@xxxxxxxxxxxxxx>
> ---
>  drivers/staging/Kconfig                      |    2 +
>  drivers/staging/Makefile                     |    1 +
>  drivers/staging/etnaviv/Kconfig              |   20 +
>  drivers/staging/etnaviv/Makefile             |   18 +
>  drivers/staging/etnaviv/cmdstream.xml.h      |  218 ++++
>  drivers/staging/etnaviv/common.xml.h         |  249 +++++
>  drivers/staging/etnaviv/etnaviv_buffer.c     |  271 +++++
>  drivers/staging/etnaviv/etnaviv_cmd_parser.c |  119 +++
>  drivers/staging/etnaviv/etnaviv_drv.c        |  705 +++++++++++++
>  drivers/staging/etnaviv/etnaviv_drv.h        |  138 +++
>  drivers/staging/etnaviv/etnaviv_gem.c        |  887 ++++++++++++++++
>  drivers/staging/etnaviv/etnaviv_gem.h        |  141 +++
>  drivers/staging/etnaviv/etnaviv_gem_prime.c  |  121 +++
>  drivers/staging/etnaviv/etnaviv_gem_submit.c |  421 ++++++++
>  drivers/staging/etnaviv/etnaviv_gpu.c        | 1468 ++++++++++++++++++++++++++
>  drivers/staging/etnaviv/etnaviv_gpu.h        |  198 ++++
>  drivers/staging/etnaviv/etnaviv_iommu.c      |  221 ++++
>  drivers/staging/etnaviv/etnaviv_iommu.h      |   28 +
>  drivers/staging/etnaviv/etnaviv_iommu_v2.c   |   33 +
>  drivers/staging/etnaviv/etnaviv_iommu_v2.h   |   25 +
>  drivers/staging/etnaviv/etnaviv_mmu.c        |  282 +++++
>  drivers/staging/etnaviv/etnaviv_mmu.h        |   58 +
>  drivers/staging/etnaviv/state.xml.h          |  351 ++++++
>  drivers/staging/etnaviv/state_hi.xml.h       |  407 +++++++
>  include/uapi/drm/etnaviv_drm.h               |  215 ++++
>  25 files changed, 6597 insertions(+)
>  create mode 100644 drivers/staging/etnaviv/Kconfig
>  create mode 100644 drivers/staging/etnaviv/Makefile
>  create mode 100644 drivers/staging/etnaviv/cmdstream.xml.h
>  create mode 100644 drivers/staging/etnaviv/common.xml.h
>  create mode 100644 drivers/staging/etnaviv/etnaviv_buffer.c
>  create mode 100644 drivers/staging/etnaviv/etnaviv_cmd_parser.c
>  create mode 100644 drivers/staging/etnaviv/etnaviv_drv.c
>  create mode 100644 drivers/staging/etnaviv/etnaviv_drv.h
>  create mode 100644 drivers/staging/etnaviv/etnaviv_gem.c
>  create mode 100644 drivers/staging/etnaviv/etnaviv_gem.h
>  create mode 100644 drivers/staging/etnaviv/etnaviv_gem_prime.c
>  create mode 100644 drivers/staging/etnaviv/etnaviv_gem_submit.c
>  create mode 100644 drivers/staging/etnaviv/etnaviv_gpu.c
>  create mode 100644 drivers/staging/etnaviv/etnaviv_gpu.h
>  create mode 100644 drivers/staging/etnaviv/etnaviv_iommu.c
>  create mode 100644 drivers/staging/etnaviv/etnaviv_iommu.h
>  create mode 100644 drivers/staging/etnaviv/etnaviv_iommu_v2.c
>  create mode 100644 drivers/staging/etnaviv/etnaviv_iommu_v2.h
>  create mode 100644 drivers/staging/etnaviv/etnaviv_mmu.c
>  create mode 100644 drivers/staging/etnaviv/etnaviv_mmu.h
>  create mode 100644 drivers/staging/etnaviv/state.xml.h
>  create mode 100644 drivers/staging/etnaviv/state_hi.xml.h
>  create mode 100644 include/uapi/drm/etnaviv_drm.h
>
> diff --git a/drivers/staging/Kconfig b/drivers/staging/Kconfig
> index 7f6cae5beb90..5446fe4859ce 100644
> --- a/drivers/staging/Kconfig
> +++ b/drivers/staging/Kconfig
> @@ -112,4 +112,6 @@ source "drivers/staging/fsl-mc/Kconfig"
>
>  source "drivers/staging/wilc1000/Kconfig"
>
> +source "drivers/staging/etnaviv/Kconfig"
> +
>  endif # STAGING
> diff --git a/drivers/staging/Makefile b/drivers/staging/Makefile
> index 347f6477aa3e..9fd3c06b6bfd 100644
> --- a/drivers/staging/Makefile
> +++ b/drivers/staging/Makefile
> @@ -48,3 +48,4 @@ obj-$(CONFIG_COMMON_CLK_XLNX_CLKWZRD) += clocking-wizard/
>  obj-$(CONFIG_FB_TFT)           += fbtft/
>  obj-$(CONFIG_FSL_MC_BUS)       += fsl-mc/
>  obj-$(CONFIG_WILC1000)         += wilc1000/
> +obj-$(CONFIG_DRM_ETNAVIV)      += etnaviv/
> diff --git a/drivers/staging/etnaviv/Kconfig b/drivers/staging/etnaviv/Kconfig
> new file mode 100644
> index 000000000000..6f034eda914c
> --- /dev/null
> +++ b/drivers/staging/etnaviv/Kconfig
> @@ -0,0 +1,20 @@
> +
> +config DRM_ETNAVIV
> +       tristate "etnaviv DRM"
> +       depends on DRM
> +       select SHMEM
> +       select TMPFS
> +       select IOMMU_API
> +       select IOMMU_SUPPORT
> +       default y
> +       help
> +         DRM driver for Vivante GPUs.
> +
> +config DRM_ETNAVIV_REGISTER_LOGGING
> +       bool "etnaviv DRM register logging"
> +       depends on DRM_ETNAVIV
> +       default n
> +       help
> +         Compile in support for logging register reads/writes in a format
> +         that can be parsed by envytools demsm tool.  If enabled, register
> +         logging can be switched on via etnaviv.reglog=y module param.

heh, ok, didn't realize anyone else was using demsm..  I guess that is
one of the things that I should clean up and get into upstream
envytools tree.  (And maybe come up with a better name.. if anyone has
any suggestions..)

BR,
-R

> diff --git a/drivers/staging/etnaviv/Makefile b/drivers/staging/etnaviv/Makefile
> new file mode 100644
> index 000000000000..2b71c31b6501
> --- /dev/null
> +++ b/drivers/staging/etnaviv/Makefile
> @@ -0,0 +1,18 @@
> +ccflags-y := -Iinclude/drm -Idrivers/staging/vivante
> +ifeq (, $(findstring -W,$(EXTRA_CFLAGS)))
> +       ccflags-y += -Werror
> +endif
> +
> +etnaviv-y := \
> +       etnaviv_cmd_parser.o \
> +       etnaviv_drv.o \
> +       etnaviv_gem.o \
> +       etnaviv_gem_prime.o \
> +       etnaviv_gem_submit.o \
> +       etnaviv_gpu.o \
> +       etnaviv_iommu.o \
> +       etnaviv_iommu_v2.o \
> +       etnaviv_mmu.o \
> +       etnaviv_buffer.o
> +
> +obj-$(CONFIG_DRM_ETNAVIV)      += etnaviv.o
> diff --git a/drivers/staging/etnaviv/cmdstream.xml.h b/drivers/staging/etnaviv/cmdstream.xml.h
> new file mode 100644
> index 000000000000..8c44ba9a694e
> --- /dev/null
> +++ b/drivers/staging/etnaviv/cmdstream.xml.h
> @@ -0,0 +1,218 @@
> +#ifndef CMDSTREAM_XML
> +#define CMDSTREAM_XML
> +
> +/* Autogenerated file, DO NOT EDIT manually!
> +
> +This file was generated by the rules-ng-ng headergen tool in this git repository:
> +http://0x04.net/cgit/index.cgi/rules-ng-ng
> +git clone git://0x04.net/rules-ng-ng
> +
> +The rules-ng-ng source files this header was generated from are:
> +- cmdstream.xml (  12589 bytes, from 2014-02-17 14:57:56)
> +- common.xml    (  18437 bytes, from 2015-03-25 11:27:41)
> +
> +Copyright (C) 2014
> +*/
> +
> +
> +#define FE_OPCODE_LOAD_STATE                                   0x00000001
> +#define FE_OPCODE_END                                          0x00000002
> +#define FE_OPCODE_NOP                                          0x00000003
> +#define FE_OPCODE_DRAW_2D                                      0x00000004
> +#define FE_OPCODE_DRAW_PRIMITIVES                              0x00000005
> +#define FE_OPCODE_DRAW_INDEXED_PRIMITIVES                      0x00000006
> +#define FE_OPCODE_WAIT                                         0x00000007
> +#define FE_OPCODE_LINK                                         0x00000008
> +#define FE_OPCODE_STALL                                                0x00000009
> +#define FE_OPCODE_CALL                                         0x0000000a
> +#define FE_OPCODE_RETURN                                       0x0000000b
> +#define FE_OPCODE_CHIP_SELECT                                  0x0000000d
> +#define PRIMITIVE_TYPE_POINTS                                  0x00000001
> +#define PRIMITIVE_TYPE_LINES                                   0x00000002
> +#define PRIMITIVE_TYPE_LINE_STRIP                              0x00000003
> +#define PRIMITIVE_TYPE_TRIANGLES                               0x00000004
> +#define PRIMITIVE_TYPE_TRIANGLE_STRIP                          0x00000005
> +#define PRIMITIVE_TYPE_TRIANGLE_FAN                            0x00000006
> +#define PRIMITIVE_TYPE_LINE_LOOP                               0x00000007
> +#define PRIMITIVE_TYPE_QUADS                                   0x00000008
> +#define VIV_FE_LOAD_STATE                                      0x00000000
> +
> +#define VIV_FE_LOAD_STATE_HEADER                               0x00000000
> +#define VIV_FE_LOAD_STATE_HEADER_OP__MASK                      0xf8000000
> +#define VIV_FE_LOAD_STATE_HEADER_OP__SHIFT                     27
> +#define VIV_FE_LOAD_STATE_HEADER_OP_LOAD_STATE                 0x08000000
> +#define VIV_FE_LOAD_STATE_HEADER_FIXP                          0x04000000
> +#define VIV_FE_LOAD_STATE_HEADER_COUNT__MASK                   0x03ff0000
> +#define VIV_FE_LOAD_STATE_HEADER_COUNT__SHIFT                  16
> +#define VIV_FE_LOAD_STATE_HEADER_COUNT(x)                      (((x) << VIV_FE_LOAD_STATE_HEADER_COUNT__SHIFT) & VIV_FE_LOAD_STATE_HEADER_COUNT__MASK)
> +#define VIV_FE_LOAD_STATE_HEADER_OFFSET__MASK                  0x0000ffff
> +#define VIV_FE_LOAD_STATE_HEADER_OFFSET__SHIFT                 0
> +#define VIV_FE_LOAD_STATE_HEADER_OFFSET(x)                     (((x) << VIV_FE_LOAD_STATE_HEADER_OFFSET__SHIFT) & VIV_FE_LOAD_STATE_HEADER_OFFSET__MASK)
> +#define VIV_FE_LOAD_STATE_HEADER_OFFSET__SHR                   2
> +
> +#define VIV_FE_END                                             0x00000000
> +
> +#define VIV_FE_END_HEADER                                      0x00000000
> +#define VIV_FE_END_HEADER_EVENT_ID__MASK                       0x0000001f
> +#define VIV_FE_END_HEADER_EVENT_ID__SHIFT                      0
> +#define VIV_FE_END_HEADER_EVENT_ID(x)                          (((x) << VIV_FE_END_HEADER_EVENT_ID__SHIFT) & VIV_FE_END_HEADER_EVENT_ID__MASK)
> +#define VIV_FE_END_HEADER_EVENT_ENABLE                         0x00000100
> +#define VIV_FE_END_HEADER_OP__MASK                             0xf8000000
> +#define VIV_FE_END_HEADER_OP__SHIFT                            27
> +#define VIV_FE_END_HEADER_OP_END                               0x10000000
> +
> +#define VIV_FE_NOP                                             0x00000000
> +
> +#define VIV_FE_NOP_HEADER                                      0x00000000
> +#define VIV_FE_NOP_HEADER_OP__MASK                             0xf8000000
> +#define VIV_FE_NOP_HEADER_OP__SHIFT                            27
> +#define VIV_FE_NOP_HEADER_OP_NOP                               0x18000000
> +
> +#define VIV_FE_DRAW_2D                                         0x00000000
> +
> +#define VIV_FE_DRAW_2D_HEADER                                  0x00000000
> +#define VIV_FE_DRAW_2D_HEADER_COUNT__MASK                      0x0000ff00
> +#define VIV_FE_DRAW_2D_HEADER_COUNT__SHIFT                     8
> +#define VIV_FE_DRAW_2D_HEADER_COUNT(x)                         (((x) << VIV_FE_DRAW_2D_HEADER_COUNT__SHIFT) & VIV_FE_DRAW_2D_HEADER_COUNT__MASK)
> +#define VIV_FE_DRAW_2D_HEADER_DATA_COUNT__MASK                 0x07ff0000
> +#define VIV_FE_DRAW_2D_HEADER_DATA_COUNT__SHIFT                        16
> +#define VIV_FE_DRAW_2D_HEADER_DATA_COUNT(x)                    (((x) << VIV_FE_DRAW_2D_HEADER_DATA_COUNT__SHIFT) & VIV_FE_DRAW_2D_HEADER_DATA_COUNT__MASK)
> +#define VIV_FE_DRAW_2D_HEADER_OP__MASK                         0xf8000000
> +#define VIV_FE_DRAW_2D_HEADER_OP__SHIFT                                27
> +#define VIV_FE_DRAW_2D_HEADER_OP_DRAW_2D                       0x20000000
> +
> +#define VIV_FE_DRAW_2D_TOP_LEFT                                        0x00000008
> +#define VIV_FE_DRAW_2D_TOP_LEFT_X__MASK                                0x0000ffff
> +#define VIV_FE_DRAW_2D_TOP_LEFT_X__SHIFT                       0
> +#define VIV_FE_DRAW_2D_TOP_LEFT_X(x)                           (((x) << VIV_FE_DRAW_2D_TOP_LEFT_X__SHIFT) & VIV_FE_DRAW_2D_TOP_LEFT_X__MASK)
> +#define VIV_FE_DRAW_2D_TOP_LEFT_Y__MASK                                0xffff0000
> +#define VIV_FE_DRAW_2D_TOP_LEFT_Y__SHIFT                       16
> +#define VIV_FE_DRAW_2D_TOP_LEFT_Y(x)                           (((x) << VIV_FE_DRAW_2D_TOP_LEFT_Y__SHIFT) & VIV_FE_DRAW_2D_TOP_LEFT_Y__MASK)
> +
> +#define VIV_FE_DRAW_2D_BOTTOM_RIGHT                            0x0000000c
> +#define VIV_FE_DRAW_2D_BOTTOM_RIGHT_X__MASK                    0x0000ffff
> +#define VIV_FE_DRAW_2D_BOTTOM_RIGHT_X__SHIFT                   0
> +#define VIV_FE_DRAW_2D_BOTTOM_RIGHT_X(x)                       (((x) << VIV_FE_DRAW_2D_BOTTOM_RIGHT_X__SHIFT) & VIV_FE_DRAW_2D_BOTTOM_RIGHT_X__MASK)
> +#define VIV_FE_DRAW_2D_BOTTOM_RIGHT_Y__MASK                    0xffff0000
> +#define VIV_FE_DRAW_2D_BOTTOM_RIGHT_Y__SHIFT                   16
> +#define VIV_FE_DRAW_2D_BOTTOM_RIGHT_Y(x)                       (((x) << VIV_FE_DRAW_2D_BOTTOM_RIGHT_Y__SHIFT) & VIV_FE_DRAW_2D_BOTTOM_RIGHT_Y__MASK)
> +
> +#define VIV_FE_DRAW_PRIMITIVES                                 0x00000000
> +
> +#define VIV_FE_DRAW_PRIMITIVES_HEADER                          0x00000000
> +#define VIV_FE_DRAW_PRIMITIVES_HEADER_OP__MASK                 0xf8000000
> +#define VIV_FE_DRAW_PRIMITIVES_HEADER_OP__SHIFT                        27
> +#define VIV_FE_DRAW_PRIMITIVES_HEADER_OP_DRAW_PRIMITIVES       0x28000000
> +
> +#define VIV_FE_DRAW_PRIMITIVES_COMMAND                         0x00000004
> +#define VIV_FE_DRAW_PRIMITIVES_COMMAND_TYPE__MASK              0x000000ff
> +#define VIV_FE_DRAW_PRIMITIVES_COMMAND_TYPE__SHIFT             0
> +#define VIV_FE_DRAW_PRIMITIVES_COMMAND_TYPE(x)                 (((x) << VIV_FE_DRAW_PRIMITIVES_COMMAND_TYPE__SHIFT) & VIV_FE_DRAW_PRIMITIVES_COMMAND_TYPE__MASK)
> +
> +#define VIV_FE_DRAW_PRIMITIVES_START                           0x00000008
> +
> +#define VIV_FE_DRAW_PRIMITIVES_COUNT                           0x0000000c
> +
> +#define VIV_FE_DRAW_INDEXED_PRIMITIVES                         0x00000000
> +
> +#define VIV_FE_DRAW_INDEXED_PRIMITIVES_HEADER                  0x00000000
> +#define VIV_FE_DRAW_INDEXED_PRIMITIVES_HEADER_OP__MASK         0xf8000000
> +#define VIV_FE_DRAW_INDEXED_PRIMITIVES_HEADER_OP__SHIFT                27
> +#define VIV_FE_DRAW_INDEXED_PRIMITIVES_HEADER_OP_DRAW_INDEXED_PRIMITIVES       0x30000000
> +
> +#define VIV_FE_DRAW_INDEXED_PRIMITIVES_COMMAND                 0x00000004
> +#define VIV_FE_DRAW_INDEXED_PRIMITIVES_COMMAND_TYPE__MASK      0x000000ff
> +#define VIV_FE_DRAW_INDEXED_PRIMITIVES_COMMAND_TYPE__SHIFT     0
> +#define VIV_FE_DRAW_INDEXED_PRIMITIVES_COMMAND_TYPE(x)         (((x) << VIV_FE_DRAW_INDEXED_PRIMITIVES_COMMAND_TYPE__SHIFT) & VIV_FE_DRAW_INDEXED_PRIMITIVES_COMMAND_TYPE__MASK)
> +
> +#define VIV_FE_DRAW_INDEXED_PRIMITIVES_START                   0x00000008
> +
> +#define VIV_FE_DRAW_INDEXED_PRIMITIVES_COUNT                   0x0000000c
> +
> +#define VIV_FE_DRAW_INDEXED_PRIMITIVES_OFFSET                  0x00000010
> +
> +#define VIV_FE_WAIT                                            0x00000000
> +
> +#define VIV_FE_WAIT_HEADER                                     0x00000000
> +#define VIV_FE_WAIT_HEADER_DELAY__MASK                         0x0000ffff
> +#define VIV_FE_WAIT_HEADER_DELAY__SHIFT                                0
> +#define VIV_FE_WAIT_HEADER_DELAY(x)                            (((x) << VIV_FE_WAIT_HEADER_DELAY__SHIFT) & VIV_FE_WAIT_HEADER_DELAY__MASK)
> +#define VIV_FE_WAIT_HEADER_OP__MASK                            0xf8000000
> +#define VIV_FE_WAIT_HEADER_OP__SHIFT                           27
> +#define VIV_FE_WAIT_HEADER_OP_WAIT                             0x38000000
> +
> +#define VIV_FE_LINK                                            0x00000000
> +
> +#define VIV_FE_LINK_HEADER                                     0x00000000
> +#define VIV_FE_LINK_HEADER_PREFETCH__MASK                      0x0000ffff
> +#define VIV_FE_LINK_HEADER_PREFETCH__SHIFT                     0
> +#define VIV_FE_LINK_HEADER_PREFETCH(x)                         (((x) << VIV_FE_LINK_HEADER_PREFETCH__SHIFT) & VIV_FE_LINK_HEADER_PREFETCH__MASK)
> +#define VIV_FE_LINK_HEADER_OP__MASK                            0xf8000000
> +#define VIV_FE_LINK_HEADER_OP__SHIFT                           27
> +#define VIV_FE_LINK_HEADER_OP_LINK                             0x40000000
> +
> +#define VIV_FE_LINK_ADDRESS                                    0x00000004
> +
> +#define VIV_FE_STALL                                           0x00000000
> +
> +#define VIV_FE_STALL_HEADER                                    0x00000000
> +#define VIV_FE_STALL_HEADER_OP__MASK                           0xf8000000
> +#define VIV_FE_STALL_HEADER_OP__SHIFT                          27
> +#define VIV_FE_STALL_HEADER_OP_STALL                           0x48000000
> +
> +#define VIV_FE_STALL_TOKEN                                     0x00000004
> +#define VIV_FE_STALL_TOKEN_FROM__MASK                          0x0000001f
> +#define VIV_FE_STALL_TOKEN_FROM__SHIFT                         0
> +#define VIV_FE_STALL_TOKEN_FROM(x)                             (((x) << VIV_FE_STALL_TOKEN_FROM__SHIFT) & VIV_FE_STALL_TOKEN_FROM__MASK)
> +#define VIV_FE_STALL_TOKEN_TO__MASK                            0x00001f00
> +#define VIV_FE_STALL_TOKEN_TO__SHIFT                           8
> +#define VIV_FE_STALL_TOKEN_TO(x)                               (((x) << VIV_FE_STALL_TOKEN_TO__SHIFT) & VIV_FE_STALL_TOKEN_TO__MASK)
> +
> +#define VIV_FE_CALL                                            0x00000000
> +
> +#define VIV_FE_CALL_HEADER                                     0x00000000
> +#define VIV_FE_CALL_HEADER_PREFETCH__MASK                      0x0000ffff
> +#define VIV_FE_CALL_HEADER_PREFETCH__SHIFT                     0
> +#define VIV_FE_CALL_HEADER_PREFETCH(x)                         (((x) << VIV_FE_CALL_HEADER_PREFETCH__SHIFT) & VIV_FE_CALL_HEADER_PREFETCH__MASK)
> +#define VIV_FE_CALL_HEADER_OP__MASK                            0xf8000000
> +#define VIV_FE_CALL_HEADER_OP__SHIFT                           27
> +#define VIV_FE_CALL_HEADER_OP_CALL                             0x50000000
> +
> +#define VIV_FE_CALL_ADDRESS                                    0x00000004
> +
> +#define VIV_FE_CALL_RETURN_PREFETCH                            0x00000008
> +
> +#define VIV_FE_CALL_RETURN_ADDRESS                             0x0000000c
> +
> +#define VIV_FE_RETURN                                          0x00000000
> +
> +#define VIV_FE_RETURN_HEADER                                   0x00000000
> +#define VIV_FE_RETURN_HEADER_OP__MASK                          0xf8000000
> +#define VIV_FE_RETURN_HEADER_OP__SHIFT                         27
> +#define VIV_FE_RETURN_HEADER_OP_RETURN                         0x58000000
> +
> +#define VIV_FE_CHIP_SELECT                                     0x00000000
> +
> +#define VIV_FE_CHIP_SELECT_HEADER                              0x00000000
> +#define VIV_FE_CHIP_SELECT_HEADER_OP__MASK                     0xf8000000
> +#define VIV_FE_CHIP_SELECT_HEADER_OP__SHIFT                    27
> +#define VIV_FE_CHIP_SELECT_HEADER_OP_CHIP_SELECT               0x68000000
> +#define VIV_FE_CHIP_SELECT_HEADER_ENABLE_CHIP15                        0x00008000
> +#define VIV_FE_CHIP_SELECT_HEADER_ENABLE_CHIP14                        0x00004000
> +#define VIV_FE_CHIP_SELECT_HEADER_ENABLE_CHIP13                        0x00002000
> +#define VIV_FE_CHIP_SELECT_HEADER_ENABLE_CHIP12                        0x00001000
> +#define VIV_FE_CHIP_SELECT_HEADER_ENABLE_CHIP11                        0x00000800
> +#define VIV_FE_CHIP_SELECT_HEADER_ENABLE_CHIP10                        0x00000400
> +#define VIV_FE_CHIP_SELECT_HEADER_ENABLE_CHIP9                 0x00000200
> +#define VIV_FE_CHIP_SELECT_HEADER_ENABLE_CHIP8                 0x00000100
> +#define VIV_FE_CHIP_SELECT_HEADER_ENABLE_CHIP7                 0x00000080
> +#define VIV_FE_CHIP_SELECT_HEADER_ENABLE_CHIP6                 0x00000040
> +#define VIV_FE_CHIP_SELECT_HEADER_ENABLE_CHIP5                 0x00000020
> +#define VIV_FE_CHIP_SELECT_HEADER_ENABLE_CHIP4                 0x00000010
> +#define VIV_FE_CHIP_SELECT_HEADER_ENABLE_CHIP3                 0x00000008
> +#define VIV_FE_CHIP_SELECT_HEADER_ENABLE_CHIP2                 0x00000004
> +#define VIV_FE_CHIP_SELECT_HEADER_ENABLE_CHIP1                 0x00000002
> +#define VIV_FE_CHIP_SELECT_HEADER_ENABLE_CHIP0                 0x00000001
> +
> +
> +#endif /* CMDSTREAM_XML */
> diff --git a/drivers/staging/etnaviv/common.xml.h b/drivers/staging/etnaviv/common.xml.h
> new file mode 100644
> index 000000000000..9e585d51fb78
> --- /dev/null
> +++ b/drivers/staging/etnaviv/common.xml.h
> @@ -0,0 +1,249 @@
> +#ifndef COMMON_XML
> +#define COMMON_XML
> +
> +/* Autogenerated file, DO NOT EDIT manually!
> +
> +This file was generated by the rules-ng-ng headergen tool in this git repository:
> +http://0x04.net/cgit/index.cgi/rules-ng-ng
> +git clone git://0x04.net/rules-ng-ng
> +
> +The rules-ng-ng source files this header was generated from are:
> +- state_vg.xml (   5973 bytes, from 2015-03-25 11:26:01)
> +- common.xml   (  18437 bytes, from 2015-03-25 11:27:41)
> +
> +Copyright (C) 2015
> +*/
> +
> +
> +#define PIPE_ID_PIPE_3D                                                0x00000000
> +#define PIPE_ID_PIPE_2D                                                0x00000001
> +#define SYNC_RECIPIENT_FE                                      0x00000001
> +#define SYNC_RECIPIENT_RA                                      0x00000005
> +#define SYNC_RECIPIENT_PE                                      0x00000007
> +#define SYNC_RECIPIENT_DE                                      0x0000000b
> +#define SYNC_RECIPIENT_VG                                      0x0000000f
> +#define SYNC_RECIPIENT_TESSELATOR                              0x00000010
> +#define SYNC_RECIPIENT_VG2                                     0x00000011
> +#define SYNC_RECIPIENT_TESSELATOR2                             0x00000012
> +#define SYNC_RECIPIENT_VG3                                     0x00000013
> +#define SYNC_RECIPIENT_TESSELATOR3                             0x00000014
> +#define ENDIAN_MODE_NO_SWAP                                    0x00000000
> +#define ENDIAN_MODE_SWAP_16                                    0x00000001
> +#define ENDIAN_MODE_SWAP_32                                    0x00000002
> +#define chipModel_GC300                                                0x00000300
> +#define chipModel_GC320                                                0x00000320
> +#define chipModel_GC350                                                0x00000350
> +#define chipModel_GC355                                                0x00000355
> +#define chipModel_GC400                                                0x00000400
> +#define chipModel_GC410                                                0x00000410
> +#define chipModel_GC420                                                0x00000420
> +#define chipModel_GC450                                                0x00000450
> +#define chipModel_GC500                                                0x00000500
> +#define chipModel_GC530                                                0x00000530
> +#define chipModel_GC600                                                0x00000600
> +#define chipModel_GC700                                                0x00000700
> +#define chipModel_GC800                                                0x00000800
> +#define chipModel_GC860                                                0x00000860
> +#define chipModel_GC880                                                0x00000880
> +#define chipModel_GC1000                                       0x00001000
> +#define chipModel_GC2000                                       0x00002000
> +#define chipModel_GC2100                                       0x00002100
> +#define chipModel_GC4000                                       0x00004000
> +#define RGBA_BITS_R                                            0x00000001
> +#define RGBA_BITS_G                                            0x00000002
> +#define RGBA_BITS_B                                            0x00000004
> +#define RGBA_BITS_A                                            0x00000008
> +#define chipFeatures_FAST_CLEAR                                        0x00000001
> +#define chipFeatures_SPECIAL_ANTI_ALIASING                     0x00000002
> +#define chipFeatures_PIPE_3D                                   0x00000004
> +#define chipFeatures_DXT_TEXTURE_COMPRESSION                   0x00000008
> +#define chipFeatures_DEBUG_MODE                                        0x00000010
> +#define chipFeatures_Z_COMPRESSION                             0x00000020
> +#define chipFeatures_YUV420_SCALER                             0x00000040
> +#define chipFeatures_MSAA                                      0x00000080
> +#define chipFeatures_DC                                                0x00000100
> +#define chipFeatures_PIPE_2D                                   0x00000200
> +#define chipFeatures_ETC1_TEXTURE_COMPRESSION                  0x00000400
> +#define chipFeatures_FAST_SCALER                               0x00000800
> +#define chipFeatures_HIGH_DYNAMIC_RANGE                                0x00001000
> +#define chipFeatures_YUV420_TILER                              0x00002000
> +#define chipFeatures_MODULE_CG                                 0x00004000
> +#define chipFeatures_MIN_AREA                                  0x00008000
> +#define chipFeatures_NO_EARLY_Z                                        0x00010000
> +#define chipFeatures_NO_422_TEXTURE                            0x00020000
> +#define chipFeatures_BUFFER_INTERLEAVING                       0x00040000
> +#define chipFeatures_BYTE_WRITE_2D                             0x00080000
> +#define chipFeatures_NO_SCALER                                 0x00100000
> +#define chipFeatures_YUY2_AVERAGING                            0x00200000
> +#define chipFeatures_HALF_PE_CACHE                             0x00400000
> +#define chipFeatures_HALF_TX_CACHE                             0x00800000
> +#define chipFeatures_YUY2_RENDER_TARGET                                0x01000000
> +#define chipFeatures_MEM32                                     0x02000000
> +#define chipFeatures_PIPE_VG                                   0x04000000
> +#define chipFeatures_VGTS                                      0x08000000
> +#define chipFeatures_FE20                                      0x10000000
> +#define chipFeatures_BYTE_WRITE_3D                             0x20000000
> +#define chipFeatures_RS_YUV_TARGET                             0x40000000
> +#define chipFeatures_32_BIT_INDICES                            0x80000000
> +#define chipMinorFeatures0_FLIP_Y                              0x00000001
> +#define chipMinorFeatures0_DUAL_RETURN_BUS                     0x00000002
> +#define chipMinorFeatures0_ENDIANNESS_CONFIG                   0x00000004
> +#define chipMinorFeatures0_TEXTURE_8K                          0x00000008
> +#define chipMinorFeatures0_CORRECT_TEXTURE_CONVERTER           0x00000010
> +#define chipMinorFeatures0_SPECIAL_MSAA_LOD                    0x00000020
> +#define chipMinorFeatures0_FAST_CLEAR_FLUSH                    0x00000040
> +#define chipMinorFeatures0_2DPE20                              0x00000080
> +#define chipMinorFeatures0_CORRECT_AUTO_DISABLE                        0x00000100
> +#define chipMinorFeatures0_RENDERTARGET_8K                     0x00000200
> +#define chipMinorFeatures0_2BITPERTILE                         0x00000400
> +#define chipMinorFeatures0_SEPARATE_TILE_STATUS_WHEN_INTERLEAVED       0x00000800
> +#define chipMinorFeatures0_SUPER_TILED                         0x00001000
> +#define chipMinorFeatures0_VG_20                               0x00002000
> +#define chipMinorFeatures0_TS_EXTENDED_COMMANDS                        0x00004000
> +#define chipMinorFeatures0_COMPRESSION_FIFO_FIXED              0x00008000
> +#define chipMinorFeatures0_HAS_SIGN_FLOOR_CEIL                 0x00010000
> +#define chipMinorFeatures0_VG_FILTER                           0x00020000
> +#define chipMinorFeatures0_VG_21                               0x00040000
> +#define chipMinorFeatures0_SHADER_HAS_W                                0x00080000
> +#define chipMinorFeatures0_HAS_SQRT_TRIG                       0x00100000
> +#define chipMinorFeatures0_MORE_MINOR_FEATURES                 0x00200000
> +#define chipMinorFeatures0_MC20                                        0x00400000
> +#define chipMinorFeatures0_MSAA_SIDEBAND                       0x00800000
> +#define chipMinorFeatures0_BUG_FIXES0                          0x01000000
> +#define chipMinorFeatures0_VAA                                 0x02000000
> +#define chipMinorFeatures0_BYPASS_IN_MSAA                      0x04000000
> +#define chipMinorFeatures0_HZ                                  0x08000000
> +#define chipMinorFeatures0_NEW_TEXTURE                         0x10000000
> +#define chipMinorFeatures0_2D_A8_TARGET                                0x20000000
> +#define chipMinorFeatures0_CORRECT_STENCIL                     0x40000000
> +#define chipMinorFeatures0_ENHANCE_VR                          0x80000000
> +#define chipMinorFeatures1_RSUV_SWIZZLE                                0x00000001
> +#define chipMinorFeatures1_V2_COMPRESSION                      0x00000002
> +#define chipMinorFeatures1_VG_DOUBLE_BUFFER                    0x00000004
> +#define chipMinorFeatures1_EXTRA_EVENT_STATES                  0x00000008
> +#define chipMinorFeatures1_NO_STRIPING_NEEDED                  0x00000010
> +#define chipMinorFeatures1_TEXTURE_STRIDE                      0x00000020
> +#define chipMinorFeatures1_BUG_FIXES3                          0x00000040
> +#define chipMinorFeatures1_AUTO_DISABLE                                0x00000080
> +#define chipMinorFeatures1_AUTO_RESTART_TS                     0x00000100
> +#define chipMinorFeatures1_DISABLE_PE_GATING                   0x00000200
> +#define chipMinorFeatures1_L2_WINDOWING                                0x00000400
> +#define chipMinorFeatures1_HALF_FLOAT                          0x00000800
> +#define chipMinorFeatures1_PIXEL_DITHER                                0x00001000
> +#define chipMinorFeatures1_TWO_STENCIL_REFERENCE               0x00002000
> +#define chipMinorFeatures1_EXTENDED_PIXEL_FORMAT               0x00004000
> +#define chipMinorFeatures1_CORRECT_MIN_MAX_DEPTH               0x00008000
> +#define chipMinorFeatures1_2D_DITHER                           0x00010000
> +#define chipMinorFeatures1_BUG_FIXES5                          0x00020000
> +#define chipMinorFeatures1_NEW_2D                              0x00040000
> +#define chipMinorFeatures1_NEW_FP                              0x00080000
> +#define chipMinorFeatures1_TEXTURE_HALIGN                      0x00100000
> +#define chipMinorFeatures1_NON_POWER_OF_TWO                    0x00200000
> +#define chipMinorFeatures1_LINEAR_TEXTURE_SUPPORT              0x00400000
> +#define chipMinorFeatures1_HALTI0                              0x00800000
> +#define chipMinorFeatures1_CORRECT_OVERFLOW_VG                 0x01000000
> +#define chipMinorFeatures1_NEGATIVE_LOG_FIX                    0x02000000
> +#define chipMinorFeatures1_RESOLVE_OFFSET                      0x04000000
> +#define chipMinorFeatures1_OK_TO_GATE_AXI_CLOCK                        0x08000000
> +#define chipMinorFeatures1_MMU_VERSION                         0x10000000
> +#define chipMinorFeatures1_WIDE_LINE                           0x20000000
> +#define chipMinorFeatures1_BUG_FIXES6                          0x40000000
> +#define chipMinorFeatures1_FC_FLUSH_STALL                      0x80000000
> +#define chipMinorFeatures2_LINE_LOOP                           0x00000001
> +#define chipMinorFeatures2_LOGIC_OP                            0x00000002
> +#define chipMinorFeatures2_UNK2                                        0x00000004
> +#define chipMinorFeatures2_SUPERTILED_TEXTURE                  0x00000008
> +#define chipMinorFeatures2_UNK4                                        0x00000010
> +#define chipMinorFeatures2_RECT_PRIMITIVE                      0x00000020
> +#define chipMinorFeatures2_COMPOSITION                         0x00000040
> +#define chipMinorFeatures2_CORRECT_AUTO_DISABLE_COUNT          0x00000080
> +#define chipMinorFeatures2_UNK8                                        0x00000100
> +#define chipMinorFeatures2_UNK9                                        0x00000200
> +#define chipMinorFeatures2_UNK10                               0x00000400
> +#define chipMinorFeatures2_SAMPLERBASE_16                      0x00000800
> +#define chipMinorFeatures2_UNK12                               0x00001000
> +#define chipMinorFeatures2_UNK13                               0x00002000
> +#define chipMinorFeatures2_UNK14                               0x00004000
> +#define chipMinorFeatures2_EXTRA_TEXTURE_STATE                 0x00008000
> +#define chipMinorFeatures2_FULL_DIRECTFB                       0x00010000
> +#define chipMinorFeatures2_2D_TILING                           0x00020000
> +#define chipMinorFeatures2_THREAD_WALKER_IN_PS                 0x00040000
> +#define chipMinorFeatures2_TILE_FILLER                         0x00080000
> +#define chipMinorFeatures2_UNK20                               0x00100000
> +#define chipMinorFeatures2_2D_MULTI_SOURCE_BLIT                        0x00200000
> +#define chipMinorFeatures2_UNK22                               0x00400000
> +#define chipMinorFeatures2_UNK23                               0x00800000
> +#define chipMinorFeatures2_UNK24                               0x01000000
> +#define chipMinorFeatures2_MIXED_STREAMS                       0x02000000
> +#define chipMinorFeatures2_2D_420_L2CACHE                      0x04000000
> +#define chipMinorFeatures2_UNK27                               0x08000000
> +#define chipMinorFeatures2_2D_NO_INDEX8_BRUSH                  0x10000000
> +#define chipMinorFeatures2_TEXTURE_TILED_READ                  0x20000000
> +#define chipMinorFeatures2_UNK30                               0x40000000
> +#define chipMinorFeatures2_UNK31                               0x80000000
> +#define chipMinorFeatures3_ROTATION_STALL_FIX                  0x00000001
> +#define chipMinorFeatures3_UNK1                                        0x00000002
> +#define chipMinorFeatures3_2D_MULTI_SOURCE_BLT_EX              0x00000004
> +#define chipMinorFeatures3_UNK3                                        0x00000008
> +#define chipMinorFeatures3_UNK4                                        0x00000010
> +#define chipMinorFeatures3_UNK5                                        0x00000020
> +#define chipMinorFeatures3_UNK6                                        0x00000040
> +#define chipMinorFeatures3_UNK7                                        0x00000080
> +#define chipMinorFeatures3_UNK8                                        0x00000100
> +#define chipMinorFeatures3_UNK9                                        0x00000200
> +#define chipMinorFeatures3_BUG_FIXES10                         0x00000400
> +#define chipMinorFeatures3_UNK11                               0x00000800
> +#define chipMinorFeatures3_BUG_FIXES11                         0x00001000
> +#define chipMinorFeatures3_UNK13                               0x00002000
> +#define chipMinorFeatures3_UNK14                               0x00004000
> +#define chipMinorFeatures3_UNK15                               0x00008000
> +#define chipMinorFeatures3_UNK16                               0x00010000
> +#define chipMinorFeatures3_UNK17                               0x00020000
> +#define chipMinorFeatures3_UNK18                               0x00040000
> +#define chipMinorFeatures3_UNK19                               0x00080000
> +#define chipMinorFeatures3_UNK20                               0x00100000
> +#define chipMinorFeatures3_UNK21                               0x00200000
> +#define chipMinorFeatures3_UNK22                               0x00400000
> +#define chipMinorFeatures3_UNK23                               0x00800000
> +#define chipMinorFeatures3_UNK24                               0x01000000
> +#define chipMinorFeatures3_UNK25                               0x02000000
> +#define chipMinorFeatures3_UNK26                               0x04000000
> +#define chipMinorFeatures3_UNK27                               0x08000000
> +#define chipMinorFeatures3_UNK28                               0x10000000
> +#define chipMinorFeatures3_UNK29                               0x20000000
> +#define chipMinorFeatures3_UNK30                               0x40000000
> +#define chipMinorFeatures3_UNK31                               0x80000000
> +#define chipMinorFeatures4_UNK0                                        0x00000001
> +#define chipMinorFeatures4_UNK1                                        0x00000002
> +#define chipMinorFeatures4_UNK2                                        0x00000004
> +#define chipMinorFeatures4_UNK3                                        0x00000008
> +#define chipMinorFeatures4_UNK4                                        0x00000010
> +#define chipMinorFeatures4_UNK5                                        0x00000020
> +#define chipMinorFeatures4_UNK6                                        0x00000040
> +#define chipMinorFeatures4_UNK7                                        0x00000080
> +#define chipMinorFeatures4_UNK8                                        0x00000100
> +#define chipMinorFeatures4_UNK9                                        0x00000200
> +#define chipMinorFeatures4_UNK10                               0x00000400
> +#define chipMinorFeatures4_UNK11                               0x00000800
> +#define chipMinorFeatures4_UNK12                               0x00001000
> +#define chipMinorFeatures4_UNK13                               0x00002000
> +#define chipMinorFeatures4_UNK14                               0x00004000
> +#define chipMinorFeatures4_UNK15                               0x00008000
> +#define chipMinorFeatures4_UNK16                               0x00010000
> +#define chipMinorFeatures4_UNK17                               0x00020000
> +#define chipMinorFeatures4_UNK18                               0x00040000
> +#define chipMinorFeatures4_UNK19                               0x00080000
> +#define chipMinorFeatures4_UNK20                               0x00100000
> +#define chipMinorFeatures4_UNK21                               0x00200000
> +#define chipMinorFeatures4_UNK22                               0x00400000
> +#define chipMinorFeatures4_UNK23                               0x00800000
> +#define chipMinorFeatures4_UNK24                               0x01000000
> +#define chipMinorFeatures4_UNK25                               0x02000000
> +#define chipMinorFeatures4_UNK26                               0x04000000
> +#define chipMinorFeatures4_UNK27                               0x08000000
> +#define chipMinorFeatures4_UNK28                               0x10000000
> +#define chipMinorFeatures4_UNK29                               0x20000000
> +#define chipMinorFeatures4_UNK30                               0x40000000
> +#define chipMinorFeatures4_UNK31                               0x80000000
> +
> +#endif /* COMMON_XML */
> diff --git a/drivers/staging/etnaviv/etnaviv_buffer.c b/drivers/staging/etnaviv/etnaviv_buffer.c
> new file mode 100644
> index 000000000000..586f84316f1a
> --- /dev/null
> +++ b/drivers/staging/etnaviv/etnaviv_buffer.c
> @@ -0,0 +1,271 @@
> +/*
> + * Copyright (C) 2014 Etnaviv Project
> + * Author: Christian Gmeiner <christian.gmeiner@xxxxxxxxx>
> + *
> + * This program is free software; you can redistribute it and/or modify it
> + * under the terms of the GNU General Public License version 2 as published by
> + * the Free Software Foundation.
> + *
> + * This program is distributed in the hope that it will be useful, but WITHOUT
> + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
> + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
> + * more details.
> + *
> + * You should have received a copy of the GNU General Public License along with
> + * this program.  If not, see <http://www.gnu.org/licenses/>.
> + */
> +
> +#include "etnaviv_gpu.h"
> +#include "etnaviv_gem.h"
> +#include "etnaviv_mmu.h"
> +
> +#include "common.xml.h"
> +#include "state.xml.h"
> +#include "cmdstream.xml.h"
> +
> +/*
> + * Command Buffer helper:
> + */
> +
> +
> +static inline void OUT(struct etnaviv_cmdbuf *buffer, u32 data)
> +{
> +       u32 *vaddr = (u32 *)buffer->vaddr;
> +
> +       BUG_ON(buffer->user_size >= buffer->size);
> +
> +       vaddr[buffer->user_size / 4] = data;
> +       buffer->user_size += 4;
> +}
> +
> +static inline void CMD_LOAD_STATE(struct etnaviv_cmdbuf *buffer,
> +       u32 reg, u32 value)
> +{
> +       u32 index = reg >> VIV_FE_LOAD_STATE_HEADER_OFFSET__SHR;
> +
> +       buffer->user_size = ALIGN(buffer->user_size, 8);
> +
> +       /* write a register via cmd stream */
> +       OUT(buffer, VIV_FE_LOAD_STATE_HEADER_OP_LOAD_STATE |
> +                   VIV_FE_LOAD_STATE_HEADER_COUNT(1) |
> +                   VIV_FE_LOAD_STATE_HEADER_OFFSET(index));
> +       OUT(buffer, value);
> +}
> +
> +static inline void CMD_END(struct etnaviv_cmdbuf *buffer)
> +{
> +       buffer->user_size = ALIGN(buffer->user_size, 8);
> +
> +       OUT(buffer, VIV_FE_END_HEADER_OP_END);
> +}
> +
> +static inline void CMD_WAIT(struct etnaviv_cmdbuf *buffer)
> +{
> +       buffer->user_size = ALIGN(buffer->user_size, 8);
> +
> +       OUT(buffer, VIV_FE_WAIT_HEADER_OP_WAIT | 200);
> +}
> +
> +static inline void CMD_LINK(struct etnaviv_cmdbuf *buffer,
> +       u16 prefetch, u32 address)
> +{
> +       buffer->user_size = ALIGN(buffer->user_size, 8);
> +
> +       OUT(buffer, VIV_FE_LINK_HEADER_OP_LINK |
> +                   VIV_FE_LINK_HEADER_PREFETCH(prefetch));
> +       OUT(buffer, address);
> +}
> +
> +static inline void CMD_STALL(struct etnaviv_cmdbuf *buffer,
> +       u32 from, u32 to)
> +{
> +       buffer->user_size = ALIGN(buffer->user_size, 8);
> +
> +       OUT(buffer, VIV_FE_STALL_HEADER_OP_STALL);
> +       OUT(buffer, VIV_FE_STALL_TOKEN_FROM(from) | VIV_FE_STALL_TOKEN_TO(to));
> +}
> +
> +static void etnaviv_cmd_select_pipe(struct etnaviv_cmdbuf *buffer, u8 pipe)
> +{
> +       u32 flush;
> +       u32 stall;
> +
> +       if (pipe == ETNA_PIPE_2D)
> +               flush = VIVS_GL_FLUSH_CACHE_DEPTH | VIVS_GL_FLUSH_CACHE_COLOR;
> +       else
> +               flush = VIVS_GL_FLUSH_CACHE_TEXTURE;
> +
> +       stall = VIVS_GL_SEMAPHORE_TOKEN_FROM(SYNC_RECIPIENT_FE) |
> +               VIVS_GL_SEMAPHORE_TOKEN_TO(SYNC_RECIPIENT_PE);
> +
> +       CMD_LOAD_STATE(buffer, VIVS_GL_FLUSH_CACHE, flush);
> +       CMD_LOAD_STATE(buffer, VIVS_GL_SEMAPHORE_TOKEN, stall);
> +
> +       CMD_STALL(buffer, SYNC_RECIPIENT_FE, SYNC_RECIPIENT_PE);
> +
> +       CMD_LOAD_STATE(buffer, VIVS_GL_PIPE_SELECT,
> +                      VIVS_GL_PIPE_SELECT_PIPE(pipe));
> +}
> +
> +static u32 gpu_va(struct etnaviv_gpu *gpu, struct etnaviv_cmdbuf *buf)
> +{
> +       return buf->paddr - gpu->memory_base;
> +}
> +
> +static void etnaviv_buffer_dump(struct etnaviv_gpu *gpu,
> +       struct etnaviv_cmdbuf *buf, u32 off, u32 len)
> +{
> +       u32 size = buf->size;
> +       u32 *ptr = buf->vaddr + off;
> +
> +       dev_info(gpu->dev, "virt %p phys 0x%08x free 0x%08x\n",
> +                       ptr, gpu_va(gpu, buf) + off, size - len * 4 - off);
> +
> +       print_hex_dump(KERN_INFO, "cmd ", DUMP_PREFIX_OFFSET, 16, 4,
> +                       ptr, len * 4, 0);
> +}
> +
> +u16 etnaviv_buffer_init(struct etnaviv_gpu *gpu)
> +{
> +       struct etnaviv_cmdbuf *buffer = gpu->buffer;
> +
> +       /* initialize buffer */
> +       buffer->user_size = 0;
> +
> +       CMD_WAIT(buffer);
> +       CMD_LINK(buffer, 2, gpu_va(gpu, buffer) + buffer->user_size - 4);
> +
> +       return buffer->user_size / 8;
> +}
> +
> +void etnaviv_buffer_end(struct etnaviv_gpu *gpu)
> +{
> +       struct etnaviv_cmdbuf *buffer = gpu->buffer;
> +
> +       /* Replace the last WAIT with an END */
> +       buffer->user_size -= 16;
> +
> +       CMD_END(buffer);
> +       mb();
> +}
> +
> +void etnaviv_buffer_queue(struct etnaviv_gpu *gpu, unsigned int event,
> +       struct etnaviv_gem_submit *submit)
> +{
> +       struct etnaviv_cmdbuf *buffer = gpu->buffer;
> +       u32 *lw = buffer->vaddr + buffer->user_size - 16;
> +       u32 back, link_target, link_size, reserve_size, extra_size = 0;
> +
> +       if (drm_debug & DRM_UT_DRIVER)
> +               etnaviv_buffer_dump(gpu, buffer, 0, 0x50);
> +
> +       /*
> +        * If we need to flush the MMU prior to submitting this buffer, we
> +        * will need to append a mmu flush load state, followed by a new
> +        * link to this buffer - a total of four additional words.
> +        */
> +       if (gpu->mmu->need_flush || gpu->switch_context) {
> +               /* link command */
> +               extra_size += 2;
> +               /* flush command */
> +               if (gpu->mmu->need_flush)
> +                       extra_size += 2;
> +               /* pipe switch commands */
> +               if (gpu->switch_context)
> +                       extra_size += 8;
> +       }
> +
> +       reserve_size = (6 + extra_size) * 4;
> +
> +       /*
> +        * if we are going to completely overflow the buffer, we need to wrap.
> +        */
> +       if (buffer->user_size + reserve_size > buffer->size)
> +               buffer->user_size = 0;
> +
> +       /* save offset back into main buffer */
> +       back = buffer->user_size + reserve_size - 6 * 4;
> +       link_target = gpu_va(gpu, buffer) + buffer->user_size;
> +       link_size = 6;
> +
> +       /* Skip over any extra instructions */
> +       link_target += extra_size * sizeof(u32);
> +
> +       if (drm_debug & DRM_UT_DRIVER)
> +               pr_info("stream link to 0x%08x @ 0x%08x %p\n",
> +                       link_target, gpu_va(gpu, submit->cmdbuf),
> +                       submit->cmdbuf->vaddr);
> +
> +       /* jump back from cmd to main buffer */
> +       CMD_LINK(submit->cmdbuf, link_size, link_target);
> +
> +       link_target = gpu_va(gpu, submit->cmdbuf);
> +       link_size = submit->cmdbuf->size / 8;
> +
> +
> +
> +       if (drm_debug & DRM_UT_DRIVER) {
> +               print_hex_dump(KERN_INFO, "cmd ", DUMP_PREFIX_OFFSET, 16, 4,
> +                              submit->cmdbuf->vaddr, submit->cmdbuf->size, 0);
> +
> +               pr_info("link op: %p\n", lw);
> +               pr_info("link addr: %p\n", lw + 1);
> +               pr_info("addr: 0x%08x\n", link_target);
> +               pr_info("back: 0x%08x\n", gpu_va(gpu, buffer) + back);
> +               pr_info("event: %d\n", event);
> +       }
> +
> +       if (gpu->mmu->need_flush || gpu->switch_context) {
> +               u32 new_target = gpu_va(gpu, buffer) + buffer->user_size;
> +
> +               if (gpu->mmu->need_flush) {
> +                       /* Add the MMU flush */
> +                       CMD_LOAD_STATE(buffer, VIVS_GL_FLUSH_MMU,
> +                                      VIVS_GL_FLUSH_MMU_FLUSH_FEMMU |
> +                                      VIVS_GL_FLUSH_MMU_FLUSH_UNK1 |
> +                                      VIVS_GL_FLUSH_MMU_FLUSH_UNK2 |
> +                                      VIVS_GL_FLUSH_MMU_FLUSH_PEMMU |
> +                                      VIVS_GL_FLUSH_MMU_FLUSH_UNK4);
> +
> +                       gpu->mmu->need_flush = false;
> +               }
> +
> +               if (gpu->switch_context) {
> +                       etnaviv_cmd_select_pipe(buffer, submit->exec_state);
> +                       gpu->switch_context = false;
> +               }
> +
> +               /* And the link to the first buffer */
> +               CMD_LINK(buffer, link_size, link_target);
> +
> +               /* Update the link target to point to above instructions */
> +               link_target = new_target;
> +               link_size = extra_size;
> +       }
> +
> +       /* Save the event and buffer position of the new event trigger */
> +       gpu->event[event].fence = submit->fence;
> +
> +       /* take ownership of cmdbuffer*/
> +       submit->cmdbuf->fence = submit->fence;
> +       list_add_tail(&submit->cmdbuf->gpu_active_list, &gpu->active_cmd_list);
> +       submit->cmdbuf = NULL;
> +
> +       /* trigger event */
> +       CMD_LOAD_STATE(buffer, VIVS_GL_EVENT, VIVS_GL_EVENT_EVENT_ID(event) |
> +                      VIVS_GL_EVENT_FROM_PE);
> +
> +       /* append WAIT/LINK to main buffer */
> +       CMD_WAIT(buffer);
> +       CMD_LINK(buffer, 2, gpu_va(gpu, buffer) + (buffer->user_size - 4));
> +
> +       /* Change WAIT into a LINK command; write the address first. */
> +       *(lw + 1) = link_target;
> +       mb();
> +       *(lw) = VIV_FE_LINK_HEADER_OP_LINK |
> +               VIV_FE_LINK_HEADER_PREFETCH(link_size);
> +       mb();
> +
> +       if (drm_debug & DRM_UT_DRIVER)
> +               etnaviv_buffer_dump(gpu, buffer, 0, 0x50);
> +}
> diff --git a/drivers/staging/etnaviv/etnaviv_cmd_parser.c b/drivers/staging/etnaviv/etnaviv_cmd_parser.c
> new file mode 100644
> index 000000000000..5175d6eb3bdc
> --- /dev/null
> +++ b/drivers/staging/etnaviv/etnaviv_cmd_parser.c
> @@ -0,0 +1,119 @@
> +/*
> + * Copyright (C) 2015 Etnaviv Project
> + *
> + * This program is free software; you can redistribute it and/or modify it
> + * under the terms of the GNU General Public License version 2 as published by
> + * the Free Software Foundation.
> + *
> + * This program is distributed in the hope that it will be useful, but WITHOUT
> + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
> + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
> + * more details.
> + *
> + * You should have received a copy of the GNU General Public License along with
> + * this program.  If not, see <http://www.gnu.org/licenses/>.
> + */
> +
> +#include <linux/kernel.h>
> +
> +#include "etnaviv_gem.h"
> +#include "etnaviv_gpu.h"
> +
> +#include "cmdstream.xml.h"
> +
> +#define EXTRACT(val, field) (((val) & field##__MASK) >> field##__SHIFT)
> +
> +static bool etnaviv_validate_load_state(struct etnaviv_gpu *gpu, u32 *buf,
> +       unsigned int state, unsigned int num)
> +{
> +       return true;
> +       if (0x1200 - state < num * 4)
> +               return false;
> +       if (0x1228 - state < num * 4)
> +               return false;
> +       if (0x1238 - state < num * 4)
> +               return false;
> +       if (0x1284 - state < num * 4)
> +               return false;
> +       if (0x128c - state < num * 4)
> +               return false;
> +       if (0x1304 - state < num * 4)
> +               return false;
> +       if (0x1310 - state < num * 4)
> +               return false;
> +       if (0x1318 - state < num * 4)
> +               return false;
> +       if (0x1280c - state < num * 4 + 0x0c)
> +               return false;
> +       if (0x128ac - state < num * 4 + 0x0c)
> +               return false;
> +       if (0x128cc - state < num * 4 + 0x0c)
> +               return false;
> +       if (0x1297c - state < num * 4 + 0x0c)
> +               return false;
> +       return true;
> +}
> +
> +static uint8_t cmd_length[32] = {
> +       [FE_OPCODE_DRAW_PRIMITIVES] = 4,
> +       [FE_OPCODE_DRAW_INDEXED_PRIMITIVES] = 6,
> +       [FE_OPCODE_NOP] = 2,
> +       [FE_OPCODE_STALL] = 2,
> +};
> +
> +bool etnaviv_cmd_validate_one(struct etnaviv_gpu *gpu, void *stream,
> +                             unsigned int size)
> +{
> +       u32 *start = stream;
> +       u32 *buf = start;
> +       u32 *end = buf + size;
> +
> +       while (buf < end) {
> +               u32 cmd = *buf;
> +               unsigned int len, n, off;
> +               unsigned int op = cmd >> 27;
> +
> +               switch (op) {
> +               case FE_OPCODE_LOAD_STATE:
> +                       n = EXTRACT(cmd, VIV_FE_LOAD_STATE_HEADER_COUNT);
> +                       len = ALIGN(1 + n, 2);
> +                       if (buf + len > end)
> +                               break;
> +
> +                       off = EXTRACT(cmd, VIV_FE_LOAD_STATE_HEADER_OFFSET);
> +                       if (!etnaviv_validate_load_state(gpu, buf + 1,
> +                                                        off * 4, n)) {
> +                               dev_warn(gpu->dev, "%s: load state covers restricted state (0x%x-0x%x) at offset %tu\n",
> +                                        __func__, off * 4, (off + n) * 4, buf - start);
> +                               return false;
> +                       }
> +                       break;
> +
> +               case FE_OPCODE_DRAW_2D:
> +                       n = EXTRACT(cmd, VIV_FE_DRAW_2D_HEADER_COUNT);
> +                       if (n == 0)
> +                               n = 256;
> +                       len = 2 + n * 2;
> +                       break;
> +
> +               default:
> +                       len = cmd_length[op];
> +                       if (len == 0) {
> +                               dev_err(gpu->dev, "%s: op %u not permitted at offset %tu\n",
> +                                       __func__, op, buf - start);
> +                               return false;
> +                       }
> +                       break;
> +               }
> +
> +               buf += len;
> +       }
> +
> +       if (buf > end) {
> +               dev_err(gpu->dev, "%s: commands overflow end of buffer: %tu > %u\n",
> +                       __func__, buf - start, size);
> +               return false;
> +       }
> +
> +       return true;
> +}
> diff --git a/drivers/staging/etnaviv/etnaviv_drv.c b/drivers/staging/etnaviv/etnaviv_drv.c
> new file mode 100644
> index 000000000000..30f6e5d0c91d
> --- /dev/null
> +++ b/drivers/staging/etnaviv/etnaviv_drv.c
> @@ -0,0 +1,705 @@
> +/*
> + * Copyright (C) 2015 Etnaviv Project
> + *
> + * This program is free software; you can redistribute it and/or modify it
> + * under the terms of the GNU General Public License version 2 as published by
> + * the Free Software Foundation.
> + *
> + * This program is distributed in the hope that it will be useful, but WITHOUT
> + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
> + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
> + * more details.
> + *
> + * You should have received a copy of the GNU General Public License along with
> + * this program.  If not, see <http://www.gnu.org/licenses/>.
> + */
> +
> +#include <linux/component.h>
> +#include <linux/of_platform.h>
> +
> +#include "etnaviv_drv.h"
> +#include "etnaviv_gpu.h"
> +#include "etnaviv_gem.h"
> +#include "etnaviv_mmu.h"
> +#include "etnaviv_gem.h"
> +
> +#ifdef CONFIG_DRM_ETNAVIV_REGISTER_LOGGING
> +static bool reglog;
> +MODULE_PARM_DESC(reglog, "Enable register read/write logging");
> +module_param(reglog, bool, 0600);
> +#else
> +#define reglog 0
> +#endif
> +
> +void __iomem *etnaviv_ioremap(struct platform_device *pdev, const char *name,
> +               const char *dbgname)
> +{
> +       struct resource *res;
> +       void __iomem *ptr;
> +
> +       if (name)
> +               res = platform_get_resource_byname(pdev, IORESOURCE_MEM, name);
> +       else
> +               res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
> +
> +       ptr = devm_ioremap_resource(&pdev->dev, res);
> +       if (IS_ERR(ptr)) {
> +               dev_err(&pdev->dev, "failed to ioremap %s: %ld\n", name,
> +                       PTR_ERR(ptr));
> +               return ptr;
> +       }
> +
> +       if (reglog)
> +               dev_printk(KERN_DEBUG, &pdev->dev, "IO:region %s 0x%p %08zx\n",
> +                          dbgname, ptr, (size_t)resource_size(res));
> +
> +       return ptr;
> +}
> +
> +void etnaviv_writel(u32 data, void __iomem *addr)
> +{
> +       if (reglog)
> +               printk(KERN_DEBUG "IO:W %p %08x\n", addr, data);
> +
> +       writel(data, addr);
> +}
> +
> +u32 etnaviv_readl(const void __iomem *addr)
> +{
> +       u32 val = readl(addr);
> +
> +       if (reglog)
> +               printk(KERN_DEBUG "IO:R %p %08x\n", addr, val);
> +
> +       return val;
> +}
> +
> +/*
> + * DRM operations:
> + */
> +
> +static int etnaviv_unload(struct drm_device *dev)
> +{
> +       struct etnaviv_drm_private *priv = dev->dev_private;
> +
> +       flush_workqueue(priv->wq);
> +       destroy_workqueue(priv->wq);
> +
> +       component_unbind_all(dev->dev, dev);
> +
> +       dev->dev_private = NULL;
> +
> +       kfree(priv);
> +
> +       return 0;
> +}
> +
> +
> +static void load_gpu(struct drm_device *dev)
> +{
> +       struct etnaviv_drm_private *priv = dev->dev_private;
> +       unsigned int i;
> +
> +       for (i = 0; i < ETNA_MAX_PIPES; i++) {
> +               struct etnaviv_gpu *g = priv->gpu[i];
> +
> +               if (g) {
> +                       int ret;
> +
> +                       ret = etnaviv_gpu_init(g);
> +                       if (ret) {
> +                               dev_err(g->dev, "hw init failed: %d\n", ret);
> +                               priv->gpu[i] = NULL;
> +                       }
> +               }
> +       }
> +}
> +
> +static int etnaviv_load(struct drm_device *dev, unsigned long flags)
> +{
> +       struct platform_device *pdev = dev->platformdev;
> +       struct etnaviv_drm_private *priv;
> +       int err;
> +
> +       priv = kzalloc(sizeof(*priv), GFP_KERNEL);
> +       if (!priv) {
> +               dev_err(dev->dev, "failed to allocate private data\n");
> +               return -ENOMEM;
> +       }
> +
> +       dev->dev_private = priv;
> +
> +       priv->wq = alloc_ordered_workqueue("etnaviv", 0);
> +       if (!priv->wq) {
> +               err = -ENOMEM;
> +               goto err_wq;
> +       }
> +
> +       INIT_LIST_HEAD(&priv->inactive_list);
> +       priv->num_gpus = 0;
> +
> +       platform_set_drvdata(pdev, dev);
> +
> +       err = component_bind_all(dev->dev, dev);
> +       if (err < 0)
> +               goto err_bind;
> +
> +       load_gpu(dev);
> +
> +       return 0;
> +
> +err_bind:
> +       flush_workqueue(priv->wq);
> +       destroy_workqueue(priv->wq);
> +err_wq:
> +       kfree(priv);
> +       return err;
> +}
> +
> +static int etnaviv_open(struct drm_device *dev, struct drm_file *file)
> +{
> +       struct etnaviv_file_private *ctx;
> +
> +       ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
> +       if (!ctx)
> +               return -ENOMEM;
> +
> +       file->driver_priv = ctx;
> +
> +       return 0;
> +}
> +
> +static void etnaviv_preclose(struct drm_device *dev, struct drm_file *file)
> +{
> +       struct etnaviv_drm_private *priv = dev->dev_private;
> +       struct etnaviv_file_private *ctx = file->driver_priv;
> +       unsigned int i;
> +
> +       mutex_lock(&dev->struct_mutex);
> +       for (i = 0; i < ETNA_MAX_PIPES; i++) {
> +               struct etnaviv_gpu *gpu = priv->gpu[i];
> +
> +               if (gpu && gpu->lastctx == ctx)
> +                       gpu->lastctx = NULL;
> +       }
> +       mutex_unlock(&dev->struct_mutex);
> +
> +       kfree(ctx);
> +}
> +
> +/*
> + * DRM debugfs:
> + */
> +
> +#ifdef CONFIG_DEBUG_FS
> +static int etnaviv_gem_show(struct drm_device *dev, struct seq_file *m)
> +{
> +       struct etnaviv_drm_private *priv = dev->dev_private;
> +       struct etnaviv_gpu *gpu;
> +       unsigned int i;
> +
> +       for (i = 0; i < ETNA_MAX_PIPES; i++) {
> +               gpu = priv->gpu[i];
> +               if (gpu) {
> +                       seq_printf(m, "Active Objects (%s):\n",
> +                                  dev_name(gpu->dev));
> +                       etnaviv_gem_describe_objects(&gpu->active_list, m);
> +               }
> +       }
> +
> +       seq_puts(m, "Inactive Objects:\n");
> +       etnaviv_gem_describe_objects(&priv->inactive_list, m);
> +
> +       return 0;
> +}
> +
> +static int etnaviv_mm_show(struct drm_device *dev, struct seq_file *m)
> +{
> +       return drm_mm_dump_table(m, &dev->vma_offset_manager->vm_addr_space_mm);
> +}
> +
> +static int etnaviv_mmu_show(struct drm_device *dev, struct seq_file *m)
> +{
> +       struct etnaviv_drm_private *priv = dev->dev_private;
> +       struct etnaviv_gpu *gpu;
> +       unsigned int i;
> +
> +       for (i = 0; i < ETNA_MAX_PIPES; i++) {
> +               gpu = priv->gpu[i];
> +               if (gpu) {
> +                       seq_printf(m, "Active Objects (%s):\n",
> +                                  dev_name(gpu->dev));
> +                       drm_mm_dump_table(m, &gpu->mmu->mm);
> +               }
> +       }
> +       return 0;
> +}
> +
> +static void etnaviv_buffer_dump(struct etnaviv_gpu *gpu, struct seq_file *m)
> +{
> +       struct etnaviv_cmdbuf *buf = gpu->buffer;
> +       u32 size = buf->size;
> +       u32 *ptr = buf->vaddr;
> +       u32 i;
> +
> +       seq_printf(m, "virt %p - phys 0x%llx - free 0x%08x\n",
> +                       buf->vaddr, (u64)buf->paddr, size - buf->user_size);
> +
> +       for (i = 0; i < size / 4; i++) {
> +               if (i && !(i % 4))
> +                       seq_puts(m, "\n");
> +               if (i % 4 == 0)
> +                       seq_printf(m, "\t0x%p: ", ptr + i);
> +               seq_printf(m, "%08x ", *(ptr + i));
> +       }
> +       seq_puts(m, "\n");
> +}
> +
> +static int etnaviv_ring_show(struct drm_device *dev, struct seq_file *m)
> +{
> +       struct etnaviv_drm_private *priv = dev->dev_private;
> +       struct etnaviv_gpu *gpu;
> +       unsigned int i;
> +
> +       for (i = 0; i < ETNA_MAX_PIPES; i++) {
> +               gpu = priv->gpu[i];
> +               if (gpu) {
> +                       seq_printf(m, "Ring Buffer (%s): ",
> +                                  dev_name(gpu->dev));
> +                       etnaviv_buffer_dump(gpu, m);
> +               }
> +       }
> +       return 0;
> +}
> +
> +static int show_locked(struct seq_file *m, void *arg)
> +{
> +       struct drm_info_node *node = (struct drm_info_node *) m->private;
> +       struct drm_device *dev = node->minor->dev;
> +       int (*show)(struct drm_device *dev, struct seq_file *m) =
> +                       node->info_ent->data;
> +       int ret;
> +
> +       ret = mutex_lock_interruptible(&dev->struct_mutex);
> +       if (ret)
> +               return ret;
> +
> +       ret = show(dev, m);
> +
> +       mutex_unlock(&dev->struct_mutex);
> +
> +       return ret;
> +}
> +
> +static int show_each_gpu(struct seq_file *m, void *arg)
> +{
> +       struct drm_info_node *node = (struct drm_info_node *) m->private;
> +       struct drm_device *dev = node->minor->dev;
> +       struct etnaviv_drm_private *priv = dev->dev_private;
> +       struct etnaviv_gpu *gpu;
> +       int (*show)(struct etnaviv_gpu *gpu, struct seq_file *m) =
> +                       node->info_ent->data;
> +       unsigned int i;
> +       int ret = 0;
> +
> +       for (i = 0; i < ETNA_MAX_PIPES; i++) {
> +               gpu = priv->gpu[i];
> +               if (!gpu)
> +                       continue;
> +
> +               ret = show(gpu, m);
> +               if (ret < 0)
> +                       break;
> +       }
> +
> +       return ret;
> +}
> +
> +static struct drm_info_list etnaviv_debugfs_list[] = {
> +               {"gpu", show_each_gpu, 0, etnaviv_gpu_debugfs},
> +               {"gem", show_locked, 0, etnaviv_gem_show},
> +               { "mm", show_locked, 0, etnaviv_mm_show },
> +               {"mmu", show_locked, 0, etnaviv_mmu_show},
> +               {"ring", show_locked, 0, etnaviv_ring_show},
> +};
> +
> +static int etnaviv_debugfs_init(struct drm_minor *minor)
> +{
> +       struct drm_device *dev = minor->dev;
> +       int ret;
> +
> +       ret = drm_debugfs_create_files(etnaviv_debugfs_list,
> +                       ARRAY_SIZE(etnaviv_debugfs_list),
> +                       minor->debugfs_root, minor);
> +
> +       if (ret) {
> +               dev_err(dev->dev, "could not install etnaviv_debugfs_list\n");
> +               return ret;
> +       }
> +
> +       return ret;
> +}
> +
> +static void etnaviv_debugfs_cleanup(struct drm_minor *minor)
> +{
> +       drm_debugfs_remove_files(etnaviv_debugfs_list,
> +                       ARRAY_SIZE(etnaviv_debugfs_list), minor);
> +}
> +#endif
> +
> +/*
> + * DRM ioctls:
> + */
> +
> +static int etnaviv_ioctl_get_param(struct drm_device *dev, void *data,
> +               struct drm_file *file)
> +{
> +       struct etnaviv_drm_private *priv = dev->dev_private;
> +       struct drm_etnaviv_param *args = data;
> +       struct etnaviv_gpu *gpu;
> +
> +       if (args->pipe >= ETNA_MAX_PIPES)
> +               return -EINVAL;
> +
> +       gpu = priv->gpu[args->pipe];
> +       if (!gpu)
> +               return -ENXIO;
> +
> +       return etnaviv_gpu_get_param(gpu, args->param, &args->value);
> +}
> +
> +static int etnaviv_ioctl_gem_new(struct drm_device *dev, void *data,
> +               struct drm_file *file)
> +{
> +       struct drm_etnaviv_gem_new *args = data;
> +
> +       return etnaviv_gem_new_handle(dev, file, args->size,
> +                       args->flags, &args->handle);
> +}
> +
> +#define TS(t) ((struct timespec){ \
> +       .tv_sec = (t).tv_sec, \
> +       .tv_nsec = (t).tv_nsec \
> +})
> +
> +static int etnaviv_ioctl_gem_cpu_prep(struct drm_device *dev, void *data,
> +               struct drm_file *file)
> +{
> +       struct drm_etnaviv_gem_cpu_prep *args = data;
> +       struct drm_gem_object *obj;
> +       int ret;
> +
> +       obj = drm_gem_object_lookup(dev, file, args->handle);
> +       if (!obj)
> +               return -ENOENT;
> +
> +       ret = etnaviv_gem_cpu_prep(obj, args->op, &TS(args->timeout));
> +
> +       drm_gem_object_unreference_unlocked(obj);
> +
> +       return ret;
> +}
> +
> +static int etnaviv_ioctl_gem_cpu_fini(struct drm_device *dev, void *data,
> +               struct drm_file *file)
> +{
> +       struct drm_etnaviv_gem_cpu_fini *args = data;
> +       struct drm_gem_object *obj;
> +       int ret;
> +
> +       obj = drm_gem_object_lookup(dev, file, args->handle);
> +       if (!obj)
> +               return -ENOENT;
> +
> +       ret = etnaviv_gem_cpu_fini(obj);
> +
> +       drm_gem_object_unreference_unlocked(obj);
> +
> +       return ret;
> +}
> +
> +static int etnaviv_ioctl_gem_info(struct drm_device *dev, void *data,
> +               struct drm_file *file)
> +{
> +       struct drm_etnaviv_gem_info *args = data;
> +       struct drm_gem_object *obj;
> +       int ret;
> +
> +       if (args->pad)
> +               return -EINVAL;
> +
> +       obj = drm_gem_object_lookup(dev, file, args->handle);
> +       if (!obj)
> +               return -ENOENT;
> +
> +       ret = mutex_lock_interruptible(&dev->struct_mutex);
> +       if (ret == 0) {
> +               ret = etnaviv_gem_mmap_offset(obj, &args->offset);
> +
> +               mutex_unlock(&dev->struct_mutex);
> +       }
> +
> +       drm_gem_object_unreference_unlocked(obj);
> +
> +       return ret;
> +}
> +
> +static int etnaviv_ioctl_wait_fence(struct drm_device *dev, void *data,
> +               struct drm_file *file)
> +{
> +       struct drm_etnaviv_wait_fence *args = data;
> +       struct etnaviv_drm_private *priv = dev->dev_private;
> +       struct etnaviv_gpu *gpu;
> +
> +       if (args->pipe >= ETNA_MAX_PIPES)
> +               return -EINVAL;
> +
> +       gpu = priv->gpu[args->pipe];
> +       if (!gpu)
> +               return -ENXIO;
> +
> +       return etnaviv_gpu_wait_fence_interruptible(gpu, args->fence,
> +                                                   &TS(args->timeout));
> +}
> +
> +static int etnaviv_ioctl_gem_userptr(struct drm_device *dev, void *data,
> +       struct drm_file *file)
> +{
> +       struct drm_etnaviv_gem_userptr *args = data;
> +       int access;
> +
> +       if (args->flags & ~(ETNA_USERPTR_READ|ETNA_USERPTR_WRITE) ||
> +           args->flags == 0)
> +               return -EINVAL;
> +
> +       if (offset_in_page(args->user_ptr | args->user_size) ||
> +           (uintptr_t)args->user_ptr != args->user_ptr ||
> +           (u32)args->user_size != args->user_size)
> +               return -EINVAL;
> +
> +       if (args->flags & ETNA_USERPTR_WRITE)
> +               access = VERIFY_WRITE;
> +       else
> +               access = VERIFY_READ;
> +
> +       if (!access_ok(access, (void __user *)(unsigned long)args->user_ptr,
> +                      args->user_size))
> +               return -EFAULT;
> +
> +       return etnaviv_gem_new_userptr(dev, file, args->user_ptr,
> +                                      args->user_size, args->flags,
> +                                      &args->handle);
> +}
> +
> +static int etnaviv_ioctl_gem_wait(struct drm_device *dev, void *data,
> +       struct drm_file *file)
> +{
> +       struct etnaviv_drm_private *priv = dev->dev_private;
> +       struct drm_etnaviv_gem_wait *args = data;
> +       struct drm_gem_object *obj;
> +       struct etnaviv_gpu *gpu;
> +       int ret;
> +
> +       if (args->pipe >= ETNA_MAX_PIPES)
> +               return -EINVAL;
> +
> +       gpu = priv->gpu[args->pipe];
> +       if (!gpu)
> +               return -ENXIO;
> +
> +       obj = drm_gem_object_lookup(dev, file, args->handle);
> +       if (!obj)
> +               return -ENOENT;
> +
> +       ret = etnaviv_gem_wait_bo(gpu, obj, &TS(args->timeout));
> +
> +       drm_gem_object_unreference_unlocked(obj);
> +
> +       return ret;
> +}
> +
> +static const struct drm_ioctl_desc etnaviv_ioctls[] = {
> +#define ETNA_IOCTL(n, func, flags) \
> +       DRM_IOCTL_DEF_DRV(ETNAVIV_##n, etnaviv_ioctl_##func, flags)
> +       ETNA_IOCTL(GET_PARAM,    get_param,    DRM_UNLOCKED|DRM_RENDER_ALLOW),
> +       ETNA_IOCTL(GEM_NEW,      gem_new,      DRM_UNLOCKED|DRM_AUTH|DRM_RENDER_ALLOW),
> +       ETNA_IOCTL(GEM_INFO,     gem_info,     DRM_UNLOCKED|DRM_AUTH|DRM_RENDER_ALLOW),
> +       ETNA_IOCTL(GEM_CPU_PREP, gem_cpu_prep, DRM_UNLOCKED|DRM_AUTH|DRM_RENDER_ALLOW),
> +       ETNA_IOCTL(GEM_CPU_FINI, gem_cpu_fini, DRM_UNLOCKED|DRM_AUTH|DRM_RENDER_ALLOW),
> +       ETNA_IOCTL(GEM_SUBMIT,   gem_submit,   DRM_UNLOCKED|DRM_AUTH|DRM_RENDER_ALLOW),
> +       ETNA_IOCTL(WAIT_FENCE,   wait_fence,   DRM_UNLOCKED|DRM_AUTH|DRM_RENDER_ALLOW),
> +       ETNA_IOCTL(GEM_USERPTR,  gem_userptr,  DRM_UNLOCKED|DRM_RENDER_ALLOW),
> +       ETNA_IOCTL(GEM_WAIT,     gem_wait,     DRM_UNLOCKED|DRM_AUTH|DRM_RENDER_ALLOW),
> +};
> +
> +static const struct vm_operations_struct vm_ops = {
> +       .fault = etnaviv_gem_fault,
> +       .open = drm_gem_vm_open,
> +       .close = drm_gem_vm_close,
> +};
> +
> +static const struct file_operations fops = {
> +       .owner              = THIS_MODULE,
> +       .open               = drm_open,
> +       .release            = drm_release,
> +       .unlocked_ioctl     = drm_ioctl,
> +#ifdef CONFIG_COMPAT
> +       .compat_ioctl       = drm_compat_ioctl,
> +#endif
> +       .poll               = drm_poll,
> +       .read               = drm_read,
> +       .llseek             = no_llseek,
> +       .mmap               = etnaviv_gem_mmap,
> +};
> +
> +static struct drm_driver etnaviv_drm_driver = {
> +       .driver_features    = DRIVER_HAVE_IRQ |
> +                               DRIVER_GEM |
> +                               DRIVER_PRIME |
> +                               DRIVER_RENDER,
> +       .load               = etnaviv_load,
> +       .unload             = etnaviv_unload,
> +       .open               = etnaviv_open,
> +       .preclose           = etnaviv_preclose,
> +       .set_busid          = drm_platform_set_busid,
> +       .gem_free_object    = etnaviv_gem_free_object,
> +       .gem_vm_ops         = &vm_ops,
> +       .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
> +       .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
> +       .gem_prime_export   = drm_gem_prime_export,
> +       .gem_prime_import   = drm_gem_prime_import,
> +       .gem_prime_pin      = etnaviv_gem_prime_pin,
> +       .gem_prime_unpin    = etnaviv_gem_prime_unpin,
> +       .gem_prime_get_sg_table = etnaviv_gem_prime_get_sg_table,
> +       .gem_prime_import_sg_table = etnaviv_gem_prime_import_sg_table,
> +       .gem_prime_vmap     = etnaviv_gem_prime_vmap,
> +       .gem_prime_vunmap   = etnaviv_gem_prime_vunmap,
> +#ifdef CONFIG_DEBUG_FS
> +       .debugfs_init       = etnaviv_debugfs_init,
> +       .debugfs_cleanup    = etnaviv_debugfs_cleanup,
> +#endif
> +       .ioctls             = etnaviv_ioctls,
> +       .num_ioctls         = DRM_ETNAVIV_NUM_IOCTLS,
> +       .fops               = &fops,
> +       .name               = "etnaviv",
> +       .desc               = "etnaviv DRM",
> +       .date               = "20150910",
> +       .major              = 1,
> +       .minor              = 0,
> +};
> +
> +/*
> + * Platform driver:
> + */
> +static int etnaviv_bind(struct device *dev)
> +{
> +       return drm_platform_init(&etnaviv_drm_driver, to_platform_device(dev));
> +}
> +
> +static void etnaviv_unbind(struct device *dev)
> +{
> +       drm_put_dev(dev_get_drvdata(dev));
> +}
> +
> +static const struct component_master_ops etnaviv_master_ops = {
> +       .bind = etnaviv_bind,
> +       .unbind = etnaviv_unbind,
> +};
> +
> +static int compare_of(struct device *dev, void *data)
> +{
> +       struct device_node *np = data;
> +
> +       return dev->of_node == np;
> +}
> +
> +static int compare_str(struct device *dev, void *data)
> +{
> +       return !strcmp(dev_name(dev), data);
> +}
> +
> +static int etnaviv_pdev_probe(struct platform_device *pdev)
> +{
> +       struct device *dev = &pdev->dev;
> +       struct device_node *node = dev->of_node;
> +       struct component_match *match = NULL;
> +
> +       dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
> +
> +       if (node) {
> +               struct device_node *core_node;
> +               int i;
> +
> +               for (i = 0; ; i++) {
> +                       core_node = of_parse_phandle(node, "cores", i);
> +                       if (!core_node)
> +                               break;
> +
> +                       component_match_add(&pdev->dev, &match, compare_of,
> +                                           core_node);
> +                       of_node_put(core_node);
> +               }
> +       } else if (dev->platform_data) {
> +               char **names = dev->platform_data;
> +               unsigned i;
> +
> +               for (i = 0; names[i]; i++)
> +                       component_match_add(dev, &match, compare_str, names[i]);
> +       }
> +
> +       return component_master_add_with_match(dev, &etnaviv_master_ops, match);
> +}
> +
> +static int etnaviv_pdev_remove(struct platform_device *pdev)
> +{
> +       component_master_del(&pdev->dev, &etnaviv_master_ops);
> +
> +       return 0;
> +}
> +
> +static const struct of_device_id dt_match[] = {
> +       { .compatible = "fsl,imx-gpu-subsystem" },
> +       { .compatible = "marvell,dove-gpu-subsystem" },
> +       {}
> +};
> +MODULE_DEVICE_TABLE(of, dt_match);
> +
> +static struct platform_driver etnaviv_platform_driver = {
> +       .probe      = etnaviv_pdev_probe,
> +       .remove     = etnaviv_pdev_remove,
> +       .driver     = {
> +               .owner  = THIS_MODULE,
> +               .name   = "etnaviv",
> +               .of_match_table = dt_match,
> +       },
> +};
> +
> +static int __init etnaviv_init(void)
> +{
> +       int ret;
> +
> +       ret = platform_driver_register(&etnaviv_gpu_driver);
> +       if (ret != 0)
> +               return ret;
> +
> +       ret = platform_driver_register(&etnaviv_platform_driver);
> +       if (ret != 0)
> +               platform_driver_unregister(&etnaviv_gpu_driver);
> +
> +       return ret;
> +}
> +module_init(etnaviv_init);
> +
> +static void __exit etnaviv_exit(void)
> +{
> +       platform_driver_unregister(&etnaviv_gpu_driver);
> +       platform_driver_unregister(&etnaviv_platform_driver);
> +}
> +module_exit(etnaviv_exit);
> +
> +MODULE_AUTHOR("Christian Gmeiner <christian.gmeiner@xxxxxxxxx>");
> +MODULE_AUTHOR("Russell King <rmk+kernel@xxxxxxxxxxxxxxxx>");
> +MODULE_AUTHOR("Lucas Stach <l.stach@xxxxxxxxxxxxxx>");
> +MODULE_DESCRIPTION("etnaviv DRM Driver");
> +MODULE_LICENSE("GPL v2");
> +MODULE_ALIAS("platform:etnaviv");
> diff --git a/drivers/staging/etnaviv/etnaviv_drv.h b/drivers/staging/etnaviv/etnaviv_drv.h
> new file mode 100644
> index 000000000000..719e33174e83
> --- /dev/null
> +++ b/drivers/staging/etnaviv/etnaviv_drv.h
> @@ -0,0 +1,138 @@
> +/*
> + * Copyright (C) 2015 Etnaviv Project
> + *
> + * This program is free software; you can redistribute it and/or modify it
> + * under the terms of the GNU General Public License version 2 as published by
> + * the Free Software Foundation.
> + *
> + * This program is distributed in the hope that it will be useful, but WITHOUT
> + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
> + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
> + * more details.
> + *
> + * You should have received a copy of the GNU General Public License along with
> + * this program.  If not, see <http://www.gnu.org/licenses/>.
> + */
> +
> +#ifndef __ETNAVIV_DRV_H__
> +#define __ETNAVIV_DRV_H__
> +
> +#include <linux/kernel.h>
> +#include <linux/clk.h>
> +#include <linux/cpufreq.h>
> +#include <linux/module.h>
> +#include <linux/platform_device.h>
> +#include <linux/pm.h>
> +#include <linux/pm_runtime.h>
> +#include <linux/slab.h>
> +#include <linux/list.h>
> +#include <linux/iommu.h>
> +#include <linux/types.h>
> +#include <linux/sizes.h>
> +
> +#include <drm/drmP.h>
> +#include <drm/drm_crtc_helper.h>
> +#include <drm/drm_fb_helper.h>
> +#include <drm/drm_gem.h>
> +#include <drm/etnaviv_drm.h>
> +
> +struct etnaviv_gpu;
> +struct etnaviv_mmu;
> +struct etnaviv_gem_object;
> +struct etnaviv_gem_submit;
> +
> +struct etnaviv_file_private {
> +       /* currently we don't do anything useful with this.. but when
> +        * per-context address spaces are supported we'd keep track of
> +        * the context's page-tables here.
> +        */
> +       int dummy;
> +};
> +
> +struct etnaviv_drm_private {
> +       int num_gpus;
> +       struct etnaviv_gpu *gpu[ETNA_MAX_PIPES];
> +
> +       u32 next_fence;
> +
> +       /* list of GEM objects: */
> +       struct list_head inactive_list;
> +
> +       struct workqueue_struct *wq;
> +};
> +
> +static inline void etnaviv_queue_work(struct drm_device *dev,
> +       struct work_struct *w)
> +{
> +       struct etnaviv_drm_private *priv = dev->dev_private;
> +
> +       queue_work(priv->wq, w);
> +}
> +
> +int etnaviv_ioctl_gem_submit(struct drm_device *dev, void *data,
> +               struct drm_file *file);
> +
> +int etnaviv_gem_mmap(struct file *filp, struct vm_area_struct *vma);
> +int etnaviv_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
> +int etnaviv_gem_mmap_offset(struct drm_gem_object *obj, u64 *offset);
> +int etnaviv_gem_get_iova_locked(struct etnaviv_gpu *gpu,
> +       struct drm_gem_object *obj, u32 *iova);
> +int etnaviv_gem_get_iova(struct etnaviv_gpu *gpu, struct drm_gem_object *obj,
> +       int id, u32 *iova);
> +void etnaviv_gem_put_iova(struct drm_gem_object *obj);
> +struct sg_table *etnaviv_gem_prime_get_sg_table(struct drm_gem_object *obj);
> +void *etnaviv_gem_prime_vmap(struct drm_gem_object *obj);
> +void etnaviv_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr);
> +struct drm_gem_object *etnaviv_gem_prime_import_sg_table(struct drm_device *dev,
> +       struct dma_buf_attachment *attach, struct sg_table *sg);
> +int etnaviv_gem_prime_pin(struct drm_gem_object *obj);
> +void etnaviv_gem_prime_unpin(struct drm_gem_object *obj);
> +void *etnaviv_gem_vaddr_locked(struct drm_gem_object *obj);
> +void *etnaviv_gem_vaddr(struct drm_gem_object *obj);
> +dma_addr_t etnaviv_gem_paddr_locked(struct drm_gem_object *obj);
> +void etnaviv_gem_move_to_active(struct drm_gem_object *obj,
> +               struct etnaviv_gpu *gpu, u32 access, u32 fence);
> +void etnaviv_gem_move_to_inactive(struct drm_gem_object *obj);
> +int etnaviv_gem_cpu_prep(struct drm_gem_object *obj, u32 op,
> +               struct timespec *timeout);
> +int etnaviv_gem_cpu_fini(struct drm_gem_object *obj);
> +void etnaviv_gem_free_object(struct drm_gem_object *obj);
> +int etnaviv_gem_new_handle(struct drm_device *dev, struct drm_file *file,
> +               u32 size, u32 flags, u32 *handle);
> +struct drm_gem_object *etnaviv_gem_new_locked(struct drm_device *dev,
> +               u32 size, u32 flags);
> +struct drm_gem_object *etnaviv_gem_new(struct drm_device *dev,
> +               u32 size, u32 flags);
> +int etnaviv_gem_new_userptr(struct drm_device *dev, struct drm_file *file,
> +       uintptr_t ptr, u32 size, u32 flags, u32 *handle);
> +u16 etnaviv_buffer_init(struct etnaviv_gpu *gpu);
> +void etnaviv_buffer_end(struct etnaviv_gpu *gpu);
> +void etnaviv_buffer_queue(struct etnaviv_gpu *gpu, unsigned int event,
> +       struct etnaviv_gem_submit *submit);
> +bool etnaviv_cmd_validate_one(struct etnaviv_gpu *gpu,
> +       void *stream, unsigned int size);
> +
> +#ifdef CONFIG_DEBUG_FS
> +void etnaviv_gem_describe_objects(struct list_head *list, struct seq_file *m);
> +#endif
> +
> +void __iomem *etnaviv_ioremap(struct platform_device *pdev, const char *name,
> +               const char *dbgname);
> +void etnaviv_writel(u32 data, void __iomem *addr);
> +u32 etnaviv_readl(const void __iomem *addr);
> +
> +#define DBG(fmt, ...) DRM_DEBUG(fmt"\n", ##__VA_ARGS__)
> +#define VERB(fmt, ...) if (0) DRM_DEBUG(fmt"\n", ##__VA_ARGS__)
> +
> +/* returns true if fence a comes after fence b */
> +static inline bool fence_after(u32 a, u32 b)
> +{
> +       return (s32)(a - b) > 0;
> +}
> +
> +static inline bool fence_after_eq(u32 a, u32 b)
> +{
> +       return (s32)(a - b) >= 0;
> +}
> +
> +#endif /* __ETNAVIV_DRV_H__ */
> diff --git a/drivers/staging/etnaviv/etnaviv_gem.c b/drivers/staging/etnaviv/etnaviv_gem.c
> new file mode 100644
> index 000000000000..1381c952c52f
> --- /dev/null
> +++ b/drivers/staging/etnaviv/etnaviv_gem.c
> @@ -0,0 +1,887 @@
> +/*
> + * Copyright (C) 2015 Etnaviv Project
> + *
> + * This program is free software; you can redistribute it and/or modify it
> + * under the terms of the GNU General Public License version 2 as published by
> + * the Free Software Foundation.
> + *
> + * This program is distributed in the hope that it will be useful, but WITHOUT
> + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
> + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
> + * more details.
> + *
> + * You should have received a copy of the GNU General Public License along with
> + * this program.  If not, see <http://www.gnu.org/licenses/>.
> + */
> +
> +#include <linux/spinlock.h>
> +#include <linux/shmem_fs.h>
> +
> +#include "etnaviv_drv.h"
> +#include "etnaviv_gem.h"
> +#include "etnaviv_gpu.h"
> +#include "etnaviv_mmu.h"
> +
> +static void etnaviv_gem_scatter_map(struct etnaviv_gem_object *etnaviv_obj)
> +{
> +       struct drm_device *dev = etnaviv_obj->base.dev;
> +       struct sg_table *sgt = etnaviv_obj->sgt;
> +
> +       /*
> +        * For non-cached buffers, ensure the new pages are clean
> +        * because display controller, GPU, etc. are not coherent.
> +        */
> +       if (etnaviv_obj->flags & ETNA_BO_CACHE_MASK)
> +               dma_map_sg(dev->dev, sgt->sgl, sgt->nents, DMA_BIDIRECTIONAL);
> +}
> +
> +static void etnaviv_gem_scatterlist_unmap(struct etnaviv_gem_object *etnaviv_obj)
> +{
> +       struct drm_device *dev = etnaviv_obj->base.dev;
> +       struct sg_table *sgt = etnaviv_obj->sgt;
> +
> +       /*
> +        * For non-cached buffers, ensure the new pages are clean
> +        * because display controller, GPU, etc. are not coherent:
> +        *
> +        * WARNING: The DMA API does not support concurrent CPU
> +        * and device access to the memory area.  With BIDIRECTIONAL,
> +        * we will clean the cache lines which overlap the region,
> +        * and invalidate all cache lines (partially) contained in
> +        * the region.
> +        *
> +        * If you have dirty data in the overlapping cache lines,
> +        * that will corrupt the GPU-written data.  If you have
> +        * written into the remainder of the region, this can
> +        * discard those writes.
> +        */
> +       if (etnaviv_obj->flags & ETNA_BO_CACHE_MASK)
> +               dma_unmap_sg(dev->dev, sgt->sgl, sgt->nents, DMA_BIDIRECTIONAL);
> +}
> +
> +/* called with dev->struct_mutex held */
> +static int etnaviv_gem_shmem_get_pages(struct etnaviv_gem_object *etnaviv_obj)
> +{
> +       struct drm_device *dev = etnaviv_obj->base.dev;
> +       struct page **p = drm_gem_get_pages(&etnaviv_obj->base);
> +
> +       if (IS_ERR(p)) {
> +               dev_err(dev->dev, "could not get pages: %ld\n", PTR_ERR(p));
> +               return PTR_ERR(p);
> +       }
> +
> +       etnaviv_obj->pages = p;
> +
> +       return 0;
> +}
> +
> +static void put_pages(struct etnaviv_gem_object *etnaviv_obj)
> +{
> +       if (etnaviv_obj->sgt) {
> +               etnaviv_gem_scatterlist_unmap(etnaviv_obj);
> +               sg_free_table(etnaviv_obj->sgt);
> +               kfree(etnaviv_obj->sgt);
> +               etnaviv_obj->sgt = NULL;
> +       }
> +       if (etnaviv_obj->pages) {
> +               drm_gem_put_pages(&etnaviv_obj->base, etnaviv_obj->pages,
> +                                 true, false);
> +
> +               etnaviv_obj->pages = NULL;
> +       }
> +}
> +
> +struct page **etnaviv_gem_get_pages(struct etnaviv_gem_object *etnaviv_obj)
> +{
> +       int ret;
> +
> +       if (!etnaviv_obj->pages) {
> +               ret = etnaviv_obj->ops->get_pages(etnaviv_obj);
> +               if (ret < 0)
> +                       return ERR_PTR(ret);
> +       }
> +
> +       if (!etnaviv_obj->sgt) {
> +               struct drm_device *dev = etnaviv_obj->base.dev;
> +               int npages = etnaviv_obj->base.size >> PAGE_SHIFT;
> +               struct sg_table *sgt;
> +
> +               sgt = drm_prime_pages_to_sg(etnaviv_obj->pages, npages);
> +               if (IS_ERR(sgt)) {
> +                       dev_err(dev->dev, "failed to allocate sgt: %ld\n",
> +                               PTR_ERR(sgt));
> +                       return ERR_CAST(sgt);
> +               }
> +
> +               etnaviv_obj->sgt = sgt;
> +
> +               etnaviv_gem_scatter_map(etnaviv_obj);
> +       }
> +
> +       return etnaviv_obj->pages;
> +}
> +
> +void etnaviv_gem_put_pages(struct etnaviv_gem_object *etnaviv_obj)
> +{
> +       /* when we start tracking the pin count, then do something here */
> +}
> +
> +static int etnaviv_gem_mmap_obj(struct drm_gem_object *obj,
> +               struct vm_area_struct *vma)
> +{
> +       struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
> +       pgprot_t vm_page_prot;
> +
> +       vma->vm_flags &= ~VM_PFNMAP;
> +       vma->vm_flags |= VM_MIXEDMAP;
> +
> +       vm_page_prot = vm_get_page_prot(vma->vm_flags);
> +
> +       if (etnaviv_obj->flags & ETNA_BO_WC) {
> +               vma->vm_page_prot = pgprot_writecombine(vm_page_prot);
> +       } else if (etnaviv_obj->flags & ETNA_BO_UNCACHED) {
> +               vma->vm_page_prot = pgprot_noncached(vm_page_prot);
> +       } else {
> +               /*
> +                * Shunt off cached objs to shmem file so they have their own
> +                * address_space (so unmap_mapping_range does what we want,
> +                * in particular in the case of mmap'd dmabufs)
> +                */
> +               fput(vma->vm_file);
> +               get_file(obj->filp);
> +               vma->vm_pgoff = 0;
> +               vma->vm_file  = obj->filp;
> +
> +               vma->vm_page_prot = vm_page_prot;
> +       }
> +
> +       return 0;
> +}
> +
> +int etnaviv_gem_mmap(struct file *filp, struct vm_area_struct *vma)
> +{
> +       struct etnaviv_gem_object *obj;
> +       int ret;
> +
> +       ret = drm_gem_mmap(filp, vma);
> +       if (ret) {
> +               DBG("mmap failed: %d", ret);
> +               return ret;
> +       }
> +
> +       obj = to_etnaviv_bo(vma->vm_private_data);
> +       return etnaviv_gem_mmap_obj(vma->vm_private_data, vma);
> +}
> +
> +int etnaviv_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
> +{
> +       struct drm_gem_object *obj = vma->vm_private_data;
> +       struct drm_device *dev = obj->dev;
> +       struct page **pages, *page;
> +       pgoff_t pgoff;
> +       int ret;
> +
> +       /*
> +        * Make sure we don't parallel update on a fault, nor move or remove
> +        * something from beneath our feet.  Note that vm_insert_page() is
> +        * specifically coded to take care of this, so we don't have to.
> +        */
> +       ret = mutex_lock_interruptible(&dev->struct_mutex);
> +       if (ret)
> +               goto out;
> +
> +       /* make sure we have pages attached now */
> +       pages = etnaviv_gem_get_pages(to_etnaviv_bo(obj));
> +       mutex_unlock(&dev->struct_mutex);
> +
> +       if (IS_ERR(pages)) {
> +               ret = PTR_ERR(pages);
> +               goto out;
> +       }
> +
> +       /* We don't use vmf->pgoff since that has the fake offset: */
> +       pgoff = ((unsigned long)vmf->virtual_address -
> +                       vma->vm_start) >> PAGE_SHIFT;
> +
> +       page = pages[pgoff];
> +
> +       VERB("Inserting %p pfn %lx, pa %lx", vmf->virtual_address,
> +            page_to_pfn(page), page_to_pfn(page) << PAGE_SHIFT);
> +
> +       ret = vm_insert_page(vma, (unsigned long)vmf->virtual_address, page);
> +
> +out:
> +       switch (ret) {
> +       case -EAGAIN:
> +       case 0:
> +       case -ERESTARTSYS:
> +       case -EINTR:
> +       case -EBUSY:
> +               /*
> +                * EBUSY is ok: this just means that another thread
> +                * already did the job.
> +                */
> +               return VM_FAULT_NOPAGE;
> +       case -ENOMEM:
> +               return VM_FAULT_OOM;
> +       default:
> +               return VM_FAULT_SIGBUS;
> +       }
> +}
> +
> +/* get mmap offset - must be called under struct_mutex */
> +int etnaviv_gem_mmap_offset(struct drm_gem_object *obj, u64 *offset)
> +{
> +       int ret;
> +
> +       /* Make it mmapable */
> +       ret = drm_gem_create_mmap_offset(obj);
> +       if (ret)
> +               dev_err(obj->dev->dev, "could not allocate mmap offset\n");
> +       else
> +               *offset = drm_vma_node_offset_addr(&obj->vma_node);
> +
> +       return ret;
> +}
> +
> +/* should be called under struct_mutex.. although it can be called
> + * from atomic context without struct_mutex to acquire an extra
> + * iova ref if you know one is already held.
> + *
> + * That means when I do eventually need to add support for unpinning
> + * the refcnt counter needs to be atomic_t.
> + */
> +int etnaviv_gem_get_iova_locked(struct etnaviv_gpu *gpu,
> +       struct drm_gem_object *obj, u32 *iova)
> +{
> +       struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
> +       struct etnaviv_vram_mapping *mapping;
> +       int ret = 0;
> +
> +       mapping = etnaviv_gem_get_vram_mapping(etnaviv_obj, gpu->mmu);
> +       if (!mapping) {
> +               struct page **pages = etnaviv_gem_get_pages(etnaviv_obj);
> +               if (IS_ERR(pages))
> +                       return PTR_ERR(pages);
> +               ret = etnaviv_iommu_map_gem(gpu->mmu, etnaviv_obj,
> +                               gpu->memory_base, &mapping);
> +       }
> +
> +       if (!ret)
> +               *iova = mapping->iova;
> +
> +       return ret;
> +}
> +
> +int etnaviv_gem_get_iova(struct etnaviv_gpu *gpu, struct drm_gem_object *obj,
> +       int id, u32 *iova)
> +{
> +       struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
> +       struct etnaviv_vram_mapping *mapping =
> +                       etnaviv_gem_get_vram_mapping(etnaviv_obj, gpu->mmu);
> +       int ret;
> +
> +       /* this is safe right now because we don't unmap until the
> +        * bo is deleted:
> +        */
> +       if (mapping) {
> +               *iova = mapping->iova;
> +               return 0;
> +       }
> +
> +       mutex_lock(&obj->dev->struct_mutex);
> +       ret = etnaviv_gem_get_iova_locked(gpu, obj, iova);
> +       mutex_unlock(&obj->dev->struct_mutex);
> +
> +       return ret;
> +}
> +
> +void etnaviv_gem_put_iova(struct drm_gem_object *obj)
> +{
> +       /*
> +        * XXX TODO ..
> +        * NOTE: probably don't need a _locked() version.. we wouldn't
> +        * normally unmap here, but instead just mark that it could be
> +        * unmapped (if the iova refcnt drops to zero), but then later
> +        * if another _get_iova_locked() fails we can start unmapping
> +        * things that are no longer needed..
> +        */
> +}
> +
> +void *etnaviv_gem_vaddr_locked(struct drm_gem_object *obj)
> +{
> +       struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
> +
> +       WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex));
> +
> +       if (!etnaviv_obj->vaddr) {
> +               struct page **pages = etnaviv_gem_get_pages(etnaviv_obj);
> +
> +               if (IS_ERR(pages))
> +                       return ERR_CAST(pages);
> +
> +               etnaviv_obj->vaddr = vmap(pages, obj->size >> PAGE_SHIFT,
> +                               VM_MAP, pgprot_writecombine(PAGE_KERNEL));
> +       }
> +
> +       return etnaviv_obj->vaddr;
> +}
> +
> +void *etnaviv_gem_vaddr(struct drm_gem_object *obj)
> +{
> +       void *ret;
> +
> +       mutex_lock(&obj->dev->struct_mutex);
> +       ret = etnaviv_gem_vaddr_locked(obj);
> +       mutex_unlock(&obj->dev->struct_mutex);
> +
> +       return ret;
> +}
> +
> +dma_addr_t etnaviv_gem_paddr_locked(struct drm_gem_object *obj)
> +{
> +       struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
> +
> +       WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex));
> +
> +       return etnaviv_obj->paddr;
> +}
> +
> +void etnaviv_gem_move_to_active(struct drm_gem_object *obj,
> +       struct etnaviv_gpu *gpu, u32 access, u32 fence)
> +{
> +       struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
> +
> +       etnaviv_obj->gpu = gpu;
> +
> +       if (access & ETNA_SUBMIT_BO_READ)
> +               etnaviv_obj->read_fence = fence;
> +       if (access & ETNA_SUBMIT_BO_WRITE)
> +               etnaviv_obj->write_fence = fence;
> +
> +       etnaviv_obj->access |= access;
> +
> +       list_del_init(&etnaviv_obj->mm_list);
> +       list_add_tail(&etnaviv_obj->mm_list, &gpu->active_list);
> +}
> +
> +void etnaviv_gem_move_to_inactive(struct drm_gem_object *obj)
> +{
> +       struct drm_device *dev = obj->dev;
> +       struct etnaviv_drm_private *priv = dev->dev_private;
> +       struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
> +
> +       WARN_ON(!mutex_is_locked(&dev->struct_mutex));
> +
> +       etnaviv_obj->gpu = NULL;
> +       etnaviv_obj->read_fence = 0;
> +       etnaviv_obj->write_fence = 0;
> +       etnaviv_obj->access = 0;
> +       list_del_init(&etnaviv_obj->mm_list);
> +       list_add_tail(&etnaviv_obj->mm_list, &priv->inactive_list);
> +}
> +
> +static inline enum dma_data_direction etnaviv_op_to_dma_dir(u32 op)
> +{
> +       if (op & ETNA_PREP_READ)
> +               return DMA_FROM_DEVICE;
> +       else if (op & ETNA_PREP_WRITE)
> +               return DMA_TO_DEVICE;
> +       else
> +               return DMA_BIDIRECTIONAL;
> +}
> +
> +int etnaviv_gem_cpu_prep(struct drm_gem_object *obj, u32 op,
> +               struct timespec *timeout)
> +{
> +       struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
> +       struct drm_device *dev = obj->dev;
> +       int ret = 0;
> +
> +       if (is_active(etnaviv_obj)) {
> +               struct etnaviv_gpu *gpu = etnaviv_obj->gpu;
> +               u32 fence = 0;
> +
> +               if (op & ETNA_PREP_READ)
> +                       fence = etnaviv_obj->write_fence;
> +               if (op & ETNA_PREP_WRITE)
> +                       fence = max(fence, etnaviv_obj->read_fence);
> +               if (op & ETNA_PREP_NOSYNC)
> +                       timeout = NULL;
> +
> +               ret = etnaviv_gpu_wait_fence_interruptible(gpu, fence, timeout);
> +       }
> +
> +       if (etnaviv_obj->flags & ETNA_BO_CACHED) {
> +               if (!etnaviv_obj->sgt) {
> +                       void * ret;
> +
> +                       mutex_lock(&dev->struct_mutex);
> +                       ret = etnaviv_gem_get_pages(etnaviv_obj);
> +                       mutex_unlock(&dev->struct_mutex);
> +                       if (IS_ERR(ret))
> +                               return PTR_ERR(ret);
> +               }
> +
> +               dma_sync_sg_for_cpu(dev->dev, etnaviv_obj->sgt->sgl,
> +                                   etnaviv_obj->sgt->nents,
> +                                   etnaviv_op_to_dma_dir(op));
> +               etnaviv_obj->last_cpu_prep_op = op;
> +       }
> +
> +       return ret;
> +}
> +
> +int etnaviv_gem_cpu_fini(struct drm_gem_object *obj)
> +{
> +       struct drm_device *dev = obj->dev;
> +       struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
> +
> +       if (etnaviv_obj->flags & ETNA_BO_CACHED) {
> +               /* fini without a prep is almost certainly a userspace error */
> +               WARN_ON(etnaviv_obj->last_cpu_prep_op == 0);
> +               dma_sync_sg_for_device(dev->dev, etnaviv_obj->sgt->sgl,
> +                       etnaviv_obj->sgt->nents,
> +                       etnaviv_op_to_dma_dir(etnaviv_obj->last_cpu_prep_op));
> +               etnaviv_obj->last_cpu_prep_op = 0;
> +       }
> +
> +       return 0;
> +}
> +
> +int etnaviv_gem_wait_bo(struct etnaviv_gpu *gpu, struct drm_gem_object *obj,
> +       struct timespec *timeout)
> +{
> +       struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
> +
> +       return etnaviv_gpu_wait_obj_inactive(gpu, etnaviv_obj, timeout);
> +}
> +
> +#ifdef CONFIG_DEBUG_FS
> +static void etnaviv_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
> +{
> +       struct drm_device *dev = obj->dev;
> +       struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
> +       unsigned long off = drm_vma_node_start(&obj->vma_node);
> +
> +       WARN_ON(!mutex_is_locked(&dev->struct_mutex));
> +
> +       seq_printf(m, "%08x: %c(r=%u,w=%u) %2d (%2d) %08lx %p %zd\n",
> +                       etnaviv_obj->flags, is_active(etnaviv_obj) ? 'A' : 'I',
> +                       etnaviv_obj->read_fence, etnaviv_obj->write_fence,
> +                       obj->name, obj->refcount.refcount.counter,
> +                       off, etnaviv_obj->vaddr, obj->size);
> +}
> +
> +void etnaviv_gem_describe_objects(struct list_head *list, struct seq_file *m)
> +{
> +       struct etnaviv_gem_object *etnaviv_obj;
> +       int count = 0;
> +       size_t size = 0;
> +
> +       list_for_each_entry(etnaviv_obj, list, mm_list) {
> +               struct drm_gem_object *obj = &etnaviv_obj->base;
> +
> +               seq_puts(m, "   ");
> +               etnaviv_gem_describe(obj, m);
> +               count++;
> +               size += obj->size;
> +       }
> +
> +       seq_printf(m, "Total %d objects, %zu bytes\n", count, size);
> +}
> +#endif
> +
> +static void etnaviv_gem_shmem_release(struct etnaviv_gem_object *etnaviv_obj)
> +{
> +       if (etnaviv_obj->vaddr)
> +               vunmap(etnaviv_obj->vaddr);
> +       put_pages(etnaviv_obj);
> +}
> +
> +static const struct etnaviv_gem_ops etnaviv_gem_shmem_ops = {
> +       .get_pages = etnaviv_gem_shmem_get_pages,
> +       .release = etnaviv_gem_shmem_release,
> +};
> +
> +void etnaviv_gem_free_object(struct drm_gem_object *obj)
> +{
> +       struct drm_device *dev = obj->dev;
> +       struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
> +       struct etnaviv_vram_mapping *mapping, *tmp;
> +
> +       WARN_ON(!mutex_is_locked(&dev->struct_mutex));
> +
> +       /* object should not be on active list: */
> +       WARN_ON(is_active(etnaviv_obj));
> +
> +       list_del(&etnaviv_obj->mm_list);
> +
> +       list_for_each_entry_safe(mapping, tmp, &etnaviv_obj->vram_list,
> +                                obj_node)
> +               etnaviv_iommu_unmap_gem(mapping);
> +
> +       drm_gem_free_mmap_offset(obj);
> +       etnaviv_obj->ops->release(etnaviv_obj);
> +       reservation_object_fini(&etnaviv_obj->_resv);
> +       drm_gem_object_release(obj);
> +
> +       kfree(etnaviv_obj);
> +}
> +
> +int etnaviv_gem_obj_add(struct drm_device *dev, struct drm_gem_object *obj)
> +{
> +       struct etnaviv_drm_private *priv = dev->dev_private;
> +       struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
> +       int ret;
> +
> +       ret = mutex_lock_killable(&dev->struct_mutex);
> +       if (ret)
> +               return ret;
> +
> +       list_add_tail(&etnaviv_obj->mm_list, &priv->inactive_list);
> +       mutex_unlock(&dev->struct_mutex);
> +
> +       return 0;
> +}
> +
> +static int etnaviv_gem_new_impl(struct drm_device *dev,
> +               u32 size, u32 flags,
> +               struct drm_gem_object **obj)
> +{
> +       struct etnaviv_gem_object *etnaviv_obj;
> +       unsigned sz = sizeof(*etnaviv_obj);
> +       bool valid = true;
> +
> +       /* validate flags */
> +       switch (flags & ETNA_BO_CACHE_MASK) {
> +       case ETNA_BO_UNCACHED:
> +       case ETNA_BO_CACHED:
> +       case ETNA_BO_WC:
> +               break;
> +       default:
> +               valid = false;
> +       }
> +
> +       if (!valid) {
> +               dev_err(dev->dev, "invalid cache flag: %x\n",
> +                       (flags & ETNA_BO_CACHE_MASK));
> +               return -EINVAL;
> +       }
> +
> +       etnaviv_obj = kzalloc(sz, GFP_KERNEL);
> +       if (!etnaviv_obj)
> +               return -ENOMEM;
> +
> +       etnaviv_obj->flags = flags;
> +
> +       etnaviv_obj->resv = &etnaviv_obj->_resv;
> +       reservation_object_init(&etnaviv_obj->_resv);
> +
> +       INIT_LIST_HEAD(&etnaviv_obj->submit_entry);
> +       INIT_LIST_HEAD(&etnaviv_obj->mm_list);
> +       INIT_LIST_HEAD(&etnaviv_obj->vram_list);
> +
> +       *obj = &etnaviv_obj->base;
> +
> +       return 0;
> +}
> +
> +static struct drm_gem_object *__etnaviv_gem_new(struct drm_device *dev,
> +               u32 size, u32 flags)
> +{
> +       struct drm_gem_object *obj = NULL;
> +       int ret;
> +
> +       size = PAGE_ALIGN(size);
> +
> +       ret = etnaviv_gem_new_impl(dev, size, flags, &obj);
> +       if (ret)
> +               goto fail;
> +
> +       to_etnaviv_bo(obj)->ops = &etnaviv_gem_shmem_ops;
> +       ret = drm_gem_object_init(dev, obj, size);
> +       if (ret == 0) {
> +               struct address_space *mapping;
> +
> +               /*
> +                * Our buffers are kept pinned, so allocating them
> +                * from the MOVABLE zone is a really bad idea, and
> +                * conflicts with CMA.  See coments above new_inode()
> +                * why this is required _and_ expected if you're
> +                * going to pin these pages.
> +                */
> +               mapping = file_inode(obj->filp)->i_mapping;
> +               mapping_set_gfp_mask(mapping, GFP_HIGHUSER);
> +       }
> +
> +       if (ret)
> +               goto fail;
> +
> +       return obj;
> +
> +fail:
> +       if (obj)
> +               drm_gem_object_unreference_unlocked(obj);
> +
> +       return ERR_PTR(ret);
> +}
> +
> +/* convenience method to construct a GEM buffer object, and userspace handle */
> +int etnaviv_gem_new_handle(struct drm_device *dev, struct drm_file *file,
> +               u32 size, u32 flags, u32 *handle)
> +{
> +       struct drm_gem_object *obj;
> +       int ret;
> +
> +       obj = __etnaviv_gem_new(dev, size, flags);
> +       if (IS_ERR(obj))
> +               return PTR_ERR(obj);
> +
> +       ret = etnaviv_gem_obj_add(dev, obj);
> +       if (ret < 0) {
> +               drm_gem_object_unreference_unlocked(obj);
> +               return ret;
> +       }
> +
> +       ret = drm_gem_handle_create(file, obj, handle);
> +
> +       /* drop reference from allocate - handle holds it now */
> +       drm_gem_object_unreference_unlocked(obj);
> +
> +       return ret;
> +}
> +
> +struct drm_gem_object *etnaviv_gem_new(struct drm_device *dev,
> +               u32 size, u32 flags)
> +{
> +       struct drm_gem_object *obj;
> +       int ret;
> +
> +       obj = __etnaviv_gem_new(dev, size, flags);
> +       if (IS_ERR(obj))
> +               return obj;
> +
> +       ret = etnaviv_gem_obj_add(dev, obj);
> +       if (ret < 0) {
> +               drm_gem_object_unreference_unlocked(obj);
> +               return ERR_PTR(ret);
> +       }
> +
> +       return obj;
> +}
> +
> +int etnaviv_gem_new_private(struct drm_device *dev, size_t size, u32 flags,
> +       struct etnaviv_gem_object **res)
> +{
> +       struct drm_gem_object *obj;
> +       int ret;
> +
> +       ret = etnaviv_gem_new_impl(dev, size, flags, &obj);
> +       if (ret)
> +               return ret;
> +
> +       drm_gem_private_object_init(dev, obj, size);
> +
> +       *res = to_etnaviv_bo(obj);
> +
> +       return 0;
> +}
> +
> +struct etnaviv_vram_mapping *
> +etnaviv_gem_get_vram_mapping(struct etnaviv_gem_object *obj,
> +                            struct etnaviv_iommu *mmu)
> +{
> +       struct etnaviv_vram_mapping *mapping;
> +
> +       list_for_each_entry(mapping, &obj->vram_list, obj_node) {
> +               if (mapping->mmu == mmu)
> +                       return mapping;
> +       }
> +
> +       return NULL;
> +}
> +
> +struct get_pages_work {
> +       struct work_struct work;
> +       struct mm_struct *mm;
> +       struct task_struct *task;
> +       struct etnaviv_gem_object *etnaviv_obj;
> +};
> +
> +static struct page **etnaviv_gem_userptr_do_get_pages(
> +       struct etnaviv_gem_object *etnaviv_obj, struct mm_struct *mm, struct task_struct *task)
> +{
> +       int ret = 0, pinned, npages = etnaviv_obj->base.size >> PAGE_SHIFT;
> +       struct page **pvec;
> +       uintptr_t ptr;
> +
> +       pvec = drm_malloc_ab(npages, sizeof(struct page *));
> +       if (!pvec)
> +               return ERR_PTR(-ENOMEM);
> +
> +       pinned = 0;
> +       ptr = etnaviv_obj->userptr.ptr;
> +
> +       down_read(&mm->mmap_sem);
> +       while (pinned < npages) {
> +               ret = get_user_pages(task, mm, ptr, npages - pinned,
> +                                    !etnaviv_obj->userptr.ro, 0,
> +                                    pvec + pinned, NULL);
> +               if (ret < 0)
> +                       break;
> +
> +               ptr += ret * PAGE_SIZE;
> +               pinned += ret;
> +       }
> +       up_read(&mm->mmap_sem);
> +
> +       if (ret < 0) {
> +               release_pages(pvec, pinned, 0);
> +               drm_free_large(pvec);
> +               return ERR_PTR(ret);
> +       }
> +
> +       return pvec;
> +}
> +
> +static void __etnaviv_gem_userptr_get_pages(struct work_struct *_work)
> +{
> +       struct get_pages_work *work = container_of(_work, typeof(*work), work);
> +       struct etnaviv_gem_object *etnaviv_obj = work->etnaviv_obj;
> +       struct drm_device *dev = etnaviv_obj->base.dev;
> +       struct page **pvec;
> +
> +       pvec = etnaviv_gem_userptr_do_get_pages(etnaviv_obj, work->mm, work->task);
> +
> +       mutex_lock(&dev->struct_mutex);
> +       if (IS_ERR(pvec)) {
> +               etnaviv_obj->userptr.work = ERR_CAST(pvec);
> +       } else {
> +               etnaviv_obj->userptr.work = NULL;
> +               etnaviv_obj->pages = pvec;
> +       }
> +
> +       drm_gem_object_unreference(&etnaviv_obj->base);
> +       mutex_unlock(&dev->struct_mutex);
> +
> +       mmput(work->mm);
> +       put_task_struct(work->task);
> +       kfree(work);
> +}
> +
> +static int etnaviv_gem_userptr_get_pages(struct etnaviv_gem_object *etnaviv_obj)
> +{
> +       struct page **pvec = NULL;
> +       struct get_pages_work *work;
> +       struct mm_struct *mm;
> +       int ret, pinned, npages = etnaviv_obj->base.size >> PAGE_SHIFT;
> +
> +       if (etnaviv_obj->userptr.work) {
> +               if (IS_ERR(etnaviv_obj->userptr.work)) {
> +                       ret = PTR_ERR(etnaviv_obj->userptr.work);
> +                       etnaviv_obj->userptr.work = NULL;
> +               } else {
> +                       ret = -EAGAIN;
> +               }
> +               return ret;
> +       }
> +
> +       mm = get_task_mm(etnaviv_obj->userptr.task);
> +       pinned = 0;
> +       if (mm == current->mm) {
> +               pvec = drm_malloc_ab(npages, sizeof(struct page *));
> +               if (!pvec) {
> +                       mmput(mm);
> +                       return -ENOMEM;
> +               }
> +
> +               pinned = __get_user_pages_fast(etnaviv_obj->userptr.ptr, npages,
> +                                              !etnaviv_obj->userptr.ro, pvec);
> +               if (pinned < 0) {
> +                       drm_free_large(pvec);
> +                       mmput(mm);
> +                       return pinned;
> +               }
> +
> +               if (pinned == npages) {
> +                       etnaviv_obj->pages = pvec;
> +                       mmput(mm);
> +                       return 0;
> +               }
> +       }
> +
> +       release_pages(pvec, pinned, 0);
> +       drm_free_large(pvec);
> +
> +       work = kmalloc(sizeof(*work), GFP_KERNEL);
> +       if (!work) {
> +               mmput(mm);
> +               return -ENOMEM;
> +       }
> +
> +       get_task_struct(current);
> +       drm_gem_object_reference(&etnaviv_obj->base);
> +
> +       work->mm = mm;
> +       work->task = current;
> +       work->etnaviv_obj = etnaviv_obj;
> +
> +       etnaviv_obj->userptr.work = &work->work;
> +       INIT_WORK(&work->work, __etnaviv_gem_userptr_get_pages);
> +
> +       etnaviv_queue_work(etnaviv_obj->base.dev, &work->work);
> +
> +       return -EAGAIN;
> +}
> +
> +static void etnaviv_gem_userptr_release(struct etnaviv_gem_object *etnaviv_obj)
> +{
> +       if (etnaviv_obj->sgt) {
> +               etnaviv_gem_scatterlist_unmap(etnaviv_obj);
> +               sg_free_table(etnaviv_obj->sgt);
> +               kfree(etnaviv_obj->sgt);
> +       }
> +       if (etnaviv_obj->pages) {
> +               int npages = etnaviv_obj->base.size >> PAGE_SHIFT;
> +
> +               release_pages(etnaviv_obj->pages, npages, 0);
> +               drm_free_large(etnaviv_obj->pages);
> +       }
> +       put_task_struct(etnaviv_obj->userptr.task);
> +}
> +
> +static const struct etnaviv_gem_ops etnaviv_gem_userptr_ops = {
> +       .get_pages = etnaviv_gem_userptr_get_pages,
> +       .release = etnaviv_gem_userptr_release,
> +};
> +
> +int etnaviv_gem_new_userptr(struct drm_device *dev, struct drm_file *file,
> +       uintptr_t ptr, u32 size, u32 flags, u32 *handle)
> +{
> +       struct etnaviv_gem_object *etnaviv_obj;
> +       int ret;
> +
> +       ret = etnaviv_gem_new_private(dev, size, ETNA_BO_CACHED, &etnaviv_obj);
> +       if (ret)
> +               return ret;
> +
> +       etnaviv_obj->ops = &etnaviv_gem_userptr_ops;
> +       etnaviv_obj->userptr.ptr = ptr;
> +       etnaviv_obj->userptr.task = current;
> +       etnaviv_obj->userptr.ro = !(flags & ETNA_USERPTR_WRITE);
> +       get_task_struct(current);
> +
> +       ret = etnaviv_gem_obj_add(dev, &etnaviv_obj->base);
> +       if (ret) {
> +               drm_gem_object_unreference_unlocked(&etnaviv_obj->base);
> +               return ret;
> +       }
> +
> +       ret = drm_gem_handle_create(file, &etnaviv_obj->base, handle);
> +
> +       /* drop reference from allocate - handle holds it now */
> +       drm_gem_object_unreference_unlocked(&etnaviv_obj->base);
> +
> +       return ret;
> +}
> diff --git a/drivers/staging/etnaviv/etnaviv_gem.h b/drivers/staging/etnaviv/etnaviv_gem.h
> new file mode 100644
> index 000000000000..c991d12e7aed
> --- /dev/null
> +++ b/drivers/staging/etnaviv/etnaviv_gem.h
> @@ -0,0 +1,141 @@
> +/*
> + * Copyright (C) 2015 Etnaviv Project
> + *
> + * This program is free software; you can redistribute it and/or modify it
> + * under the terms of the GNU General Public License version 2 as published by
> + * the Free Software Foundation.
> + *
> + * This program is distributed in the hope that it will be useful, but WITHOUT
> + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
> + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
> + * more details.
> + *
> + * You should have received a copy of the GNU General Public License along with
> + * this program.  If not, see <http://www.gnu.org/licenses/>.
> + */
> +
> +#ifndef __ETNAVIV_GEM_H__
> +#define __ETNAVIV_GEM_H__
> +
> +#include <linux/reservation.h>
> +#include "etnaviv_drv.h"
> +
> +struct etnaviv_gem_ops;
> +struct etnaviv_gem_object;
> +
> +struct etnaviv_gem_userptr {
> +       uintptr_t ptr;
> +       struct task_struct *task;
> +       struct work_struct *work;
> +       bool ro;
> +};
> +
> +struct etnaviv_vram_mapping {
> +       struct list_head obj_node;
> +       struct list_head scan_node;
> +       struct etnaviv_gem_object *object;
> +       struct etnaviv_iommu *mmu;
> +       struct drm_mm_node vram_node;
> +       u32 iova;
> +};
> +
> +struct etnaviv_gem_object {
> +       struct drm_gem_object base;
> +       const struct etnaviv_gem_ops *ops;
> +
> +       u32 flags;
> +
> +       /* And object is either:
> +        *  inactive - on priv->inactive_list
> +        *  active   - on one one of the gpu's active_list..  well, at
> +        *     least for now we don't have (I don't think) hw sync between
> +        *     2d and 3d one devices which have both, meaning we need to
> +        *     block on submit if a bo is already on other ring
> +        *
> +        */
> +       struct list_head mm_list;
> +       struct etnaviv_gpu *gpu;     /* non-null if active */
> +       u32 access;
> +       u32 read_fence, write_fence;
> +
> +       /* Transiently in the process of submit ioctl, objects associated
> +        * with the submit are on submit->bo_list.. this only lasts for
> +        * the duration of the ioctl, so one bo can never be on multiple
> +        * submit lists.
> +        */
> +       struct list_head submit_entry;
> +
> +       struct page **pages;
> +       struct sg_table *sgt;
> +       void *vaddr;
> +
> +       /* for ETNA_BO_CMDSTREAM */
> +       dma_addr_t paddr;
> +
> +       /* normally (resv == &_resv) except for imported bo's */
> +       struct reservation_object *resv;
> +       struct reservation_object _resv;
> +
> +       struct list_head vram_list;
> +
> +       /* for buffer manipulation during submit */
> +       bool is_ring_buffer;
> +       u32 offset;
> +
> +       /* cache maintenance */
> +       uint32_t last_cpu_prep_op;
> +
> +       struct etnaviv_gem_userptr userptr;
> +};
> +
> +static inline
> +struct etnaviv_gem_object *to_etnaviv_bo(struct drm_gem_object *obj)
> +{
> +       return container_of(obj, struct etnaviv_gem_object, base);
> +}
> +
> +struct etnaviv_gem_ops {
> +       int (*get_pages)(struct etnaviv_gem_object *);
> +       void (*release)(struct etnaviv_gem_object *);
> +};
> +
> +static inline bool is_active(struct etnaviv_gem_object *etnaviv_obj)
> +{
> +       return etnaviv_obj->gpu != NULL;
> +}
> +
> +#define MAX_CMDS 4
> +
> +/* Created per submit-ioctl, to track bo's and cmdstream bufs, etc,
> + * associated with the cmdstream submission for synchronization (and
> + * make it easier to unwind when things go wrong, etc).  This only
> + * lasts for the duration of the submit-ioctl.
> + */
> +struct etnaviv_gem_submit {
> +       struct drm_device *dev;
> +       struct etnaviv_gpu *gpu;
> +       u32 exec_state;
> +       struct list_head bo_list;
> +       struct ww_acquire_ctx ticket;
> +       u32 fence;
> +       unsigned int nr_bos;
> +       struct etnaviv_cmdbuf *cmdbuf;
> +       struct {
> +               u32 flags;
> +               struct etnaviv_gem_object *obj;
> +               u32 iova;
> +       } bos[0];
> +};
> +
> +int etnaviv_gem_wait_bo(struct etnaviv_gpu *gpu, struct drm_gem_object *obj,
> +       struct timespec *timeout);
> +struct etnaviv_vram_mapping *
> +etnaviv_gem_get_vram_mapping(struct etnaviv_gem_object *obj,
> +                            struct etnaviv_iommu *mmu);
> +int etnaviv_gem_new_private(struct drm_device *dev, size_t size, u32 flags,
> +       struct etnaviv_gem_object **res);
> +int etnaviv_gem_obj_add(struct drm_device *dev, struct drm_gem_object *obj);
> +struct page **etnaviv_gem_get_pages(struct etnaviv_gem_object *obj);
> +void etnaviv_gem_put_pages(struct etnaviv_gem_object *obj);
> +
> +#endif /* __ETNAVIV_GEM_H__ */
> diff --git a/drivers/staging/etnaviv/etnaviv_gem_prime.c b/drivers/staging/etnaviv/etnaviv_gem_prime.c
> new file mode 100644
> index 000000000000..58c13ae7c345
> --- /dev/null
> +++ b/drivers/staging/etnaviv/etnaviv_gem_prime.c
> @@ -0,0 +1,121 @@
> +/*
> + * Copyright (C) 2013 Red Hat
> + * Author: Rob Clark <robdclark@xxxxxxxxx>
> + *
> + * This program is free software; you can redistribute it and/or modify it
> + * under the terms of the GNU General Public License version 2 as published by
> + * the Free Software Foundation.
> + *
> + * This program is distributed in the hope that it will be useful, but WITHOUT
> + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
> + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
> + * more details.
> + *
> + * You should have received a copy of the GNU General Public License along with
> + * this program.  If not, see <http://www.gnu.org/licenses/>.
> + */
> +
> +#include <linux/dma-buf.h>
> +#include "etnaviv_drv.h"
> +#include "etnaviv_gem.h"
> +
> +
> +struct sg_table *etnaviv_gem_prime_get_sg_table(struct drm_gem_object *obj)
> +{
> +       struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
> +
> +       BUG_ON(!etnaviv_obj->sgt);  /* should have already pinned! */
> +
> +       return etnaviv_obj->sgt;
> +}
> +
> +void *etnaviv_gem_prime_vmap(struct drm_gem_object *obj)
> +{
> +       return etnaviv_gem_vaddr(obj);
> +}
> +
> +void etnaviv_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr)
> +{
> +       /* TODO msm_gem_vunmap() */
> +}
> +
> +int etnaviv_gem_prime_pin(struct drm_gem_object *obj)
> +{
> +       if (!obj->import_attach) {
> +               struct drm_device *dev = obj->dev;
> +
> +               mutex_lock(&dev->struct_mutex);
> +               etnaviv_gem_get_pages(to_etnaviv_bo(obj));
> +               mutex_unlock(&dev->struct_mutex);
> +       }
> +       return 0;
> +}
> +
> +void etnaviv_gem_prime_unpin(struct drm_gem_object *obj)
> +{
> +       if (!obj->import_attach) {
> +               struct drm_device *dev = obj->dev;
> +
> +               mutex_lock(&dev->struct_mutex);
> +               etnaviv_gem_put_pages(to_etnaviv_bo(obj));
> +               mutex_unlock(&dev->struct_mutex);
> +       }
> +}
> +
> +static void etnaviv_gem_prime_release(struct etnaviv_gem_object *etnaviv_obj)
> +{
> +       if (etnaviv_obj->vaddr)
> +               dma_buf_vunmap(etnaviv_obj->base.import_attach->dmabuf,
> +                              etnaviv_obj->vaddr);
> +
> +       /* Don't drop the pages for imported dmabuf, as they are not
> +        * ours, just free the array we allocated:
> +        */
> +       if (etnaviv_obj->pages)
> +               drm_free_large(etnaviv_obj->pages);
> +
> +       drm_prime_gem_destroy(&etnaviv_obj->base, etnaviv_obj->sgt);
> +}
> +
> +static const struct etnaviv_gem_ops etnaviv_gem_prime_ops = {
> +       /* .get_pages should never be called */
> +       .release = etnaviv_gem_prime_release,
> +};
> +
> +struct drm_gem_object *etnaviv_gem_prime_import_sg_table(struct drm_device *dev,
> +       struct dma_buf_attachment *attach, struct sg_table *sgt)
> +{
> +       struct etnaviv_gem_object *etnaviv_obj;
> +       size_t size = PAGE_ALIGN(attach->dmabuf->size);
> +       int ret, npages;
> +
> +       ret = etnaviv_gem_new_private(dev, size, ETNA_BO_WC, &etnaviv_obj);
> +       if (ret < 0)
> +               return ERR_PTR(ret);
> +
> +       npages = size / PAGE_SIZE;
> +
> +       etnaviv_obj->ops = &etnaviv_gem_prime_ops;
> +       etnaviv_obj->sgt = sgt;
> +       etnaviv_obj->pages = drm_malloc_ab(npages, sizeof(struct page *));
> +       if (!etnaviv_obj->pages) {
> +               ret = -ENOMEM;
> +               goto fail;
> +       }
> +
> +       ret = drm_prime_sg_to_page_addr_arrays(sgt, etnaviv_obj->pages,
> +                                              NULL, npages);
> +       if (ret)
> +               goto fail;
> +
> +       ret = etnaviv_gem_obj_add(dev, &etnaviv_obj->base);
> +       if (ret)
> +               goto fail;
> +
> +       return &etnaviv_obj->base;
> +
> +fail:
> +       drm_gem_object_unreference_unlocked(&etnaviv_obj->base);
> +
> +       return ERR_PTR(ret);
> +}
> diff --git a/drivers/staging/etnaviv/etnaviv_gem_submit.c b/drivers/staging/etnaviv/etnaviv_gem_submit.c
> new file mode 100644
> index 000000000000..f886a3c66d30
> --- /dev/null
> +++ b/drivers/staging/etnaviv/etnaviv_gem_submit.c
> @@ -0,0 +1,421 @@
> +/*
> + * Copyright (C) 2015 Etnaviv Project
> + *
> + * This program is free software; you can redistribute it and/or modify it
> + * under the terms of the GNU General Public License version 2 as published by
> + * the Free Software Foundation.
> + *
> + * This program is distributed in the hope that it will be useful, but WITHOUT
> + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
> + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
> + * more details.
> + *
> + * You should have received a copy of the GNU General Public License along with
> + * this program.  If not, see <http://www.gnu.org/licenses/>.
> + */
> +
> +#include "etnaviv_drv.h"
> +#include "etnaviv_gpu.h"
> +#include "etnaviv_gem.h"
> +
> +/*
> + * Cmdstream submission:
> + */
> +
> +#define BO_INVALID_FLAGS ~(ETNA_SUBMIT_BO_READ | ETNA_SUBMIT_BO_WRITE)
> +/* make sure these don't conflict w/ ETNAVIV_SUBMIT_BO_x */
> +#define BO_LOCKED   0x4000
> +#define BO_PINNED   0x2000
> +
> +static inline void __user *to_user_ptr(u64 address)
> +{
> +       return (void __user *)(uintptr_t)address;
> +}
> +
> +static struct etnaviv_gem_submit *submit_create(struct drm_device *dev,
> +               struct etnaviv_gpu *gpu, int nr)
> +{
> +       struct etnaviv_gem_submit *submit;
> +       int sz = sizeof(*submit) + (nr * sizeof(submit->bos[0]));
> +
> +       submit = kmalloc(sz, GFP_TEMPORARY | __GFP_NOWARN | __GFP_NORETRY);
> +       if (submit) {
> +               submit->dev = dev;
> +               submit->gpu = gpu;
> +
> +               /* initially, until copy_from_user() and bo lookup succeeds: */
> +               submit->nr_bos = 0;
> +               submit->cmdbuf = NULL;
> +
> +               INIT_LIST_HEAD(&submit->bo_list);
> +               ww_acquire_init(&submit->ticket, &reservation_ww_class);
> +       }
> +
> +       return submit;
> +}
> +
> +static int submit_lookup_objects(struct etnaviv_gem_submit *submit,
> +       struct drm_file *file, struct drm_etnaviv_gem_submit_bo *submit_bos,
> +       unsigned nr_bos)
> +{
> +       struct drm_etnaviv_gem_submit_bo *bo;
> +       unsigned i;
> +       int ret = 0;
> +
> +       spin_lock(&file->table_lock);
> +
> +       for (i = 0, bo = submit_bos; i < nr_bos; i++, bo++) {
> +               struct drm_gem_object *obj;
> +               struct etnaviv_gem_object *etnaviv_obj;
> +
> +               if (bo->flags & BO_INVALID_FLAGS) {
> +                       DRM_ERROR("invalid flags: %x\n", bo->flags);
> +                       ret = -EINVAL;
> +                       goto out_unlock;
> +               }
> +
> +               submit->bos[i].flags = bo->flags;
> +
> +               /* normally use drm_gem_object_lookup(), but for bulk lookup
> +                * all under single table_lock just hit object_idr directly:
> +                */
> +               obj = idr_find(&file->object_idr, bo->handle);
> +               if (!obj) {
> +                       DRM_ERROR("invalid handle %u at index %u\n",
> +                                 bo->handle, i);
> +                       ret = -EINVAL;
> +                       goto out_unlock;
> +               }
> +
> +               etnaviv_obj = to_etnaviv_bo(obj);
> +
> +               if (!list_empty(&etnaviv_obj->submit_entry)) {
> +                       DRM_ERROR("handle %u at index %u already on submit list\n",
> +                                 bo->handle, i);
> +                       ret = -EINVAL;
> +                       goto out_unlock;
> +               }
> +
> +               drm_gem_object_reference(obj);
> +
> +               submit->bos[i].obj = etnaviv_obj;
> +
> +               list_add_tail(&etnaviv_obj->submit_entry, &submit->bo_list);
> +       }
> +
> +out_unlock:
> +       submit->nr_bos = i;
> +       spin_unlock(&file->table_lock);
> +
> +       return ret;
> +}
> +
> +static void submit_unlock_unpin_bo(struct etnaviv_gem_submit *submit, int i)
> +{
> +       struct etnaviv_gem_object *etnaviv_obj = submit->bos[i].obj;
> +
> +       if (submit->bos[i].flags & BO_PINNED)
> +               etnaviv_gem_put_iova(&etnaviv_obj->base);
> +
> +       if (submit->bos[i].flags & BO_LOCKED)
> +               ww_mutex_unlock(&etnaviv_obj->resv->lock);
> +
> +       submit->bos[i].iova = 0;
> +       submit->bos[i].flags &= ~(BO_LOCKED | BO_PINNED);
> +}
> +
> +/* This is where we make sure all the bo's are reserved and pin'd: */
> +static int submit_validate_objects(struct etnaviv_gem_submit *submit)
> +{
> +       int contended, slow_locked = -1, i, ret = 0;
> +
> +retry:
> +       for (i = 0; i < submit->nr_bos; i++) {
> +               struct etnaviv_gem_object *etnaviv_obj = submit->bos[i].obj;
> +               u32 iova;
> +
> +               if (slow_locked == i)
> +                       slow_locked = -1;
> +
> +               contended = i;
> +
> +               if (!(submit->bos[i].flags & BO_LOCKED)) {
> +                       ret = ww_mutex_lock_interruptible(&etnaviv_obj->resv->lock,
> +                                       &submit->ticket);
> +                       if (ret)
> +                               goto fail;
> +                       submit->bos[i].flags |= BO_LOCKED;
> +               }
> +
> +
> +               /* if locking succeeded, pin bo: */
> +               ret = etnaviv_gem_get_iova_locked(submit->gpu,
> +                                                 &etnaviv_obj->base, &iova);
> +
> +               /* this would break the logic in the fail path.. there is no
> +                * reason for this to happen, but just to be on the safe side
> +                * let's notice if this starts happening in the future:
> +                */
> +               WARN_ON(ret == -EDEADLK);
> +
> +               if (ret)
> +                       goto fail;
> +
> +               submit->bos[i].flags |= BO_PINNED;
> +               submit->bos[i].iova = iova;
> +       }
> +
> +       ww_acquire_done(&submit->ticket);
> +
> +       return 0;
> +
> +fail:
> +       for (; i >= 0; i--)
> +               submit_unlock_unpin_bo(submit, i);
> +
> +       if (slow_locked > 0)
> +               submit_unlock_unpin_bo(submit, slow_locked);
> +
> +       if (ret == -EDEADLK) {
> +               struct etnaviv_gem_object *etnaviv_obj;
> +
> +               etnaviv_obj = submit->bos[contended].obj;
> +
> +               /* we lost out in a seqno race, lock and retry.. */
> +               ret = ww_mutex_lock_slow_interruptible(&etnaviv_obj->resv->lock,
> +                               &submit->ticket);
> +               if (!ret) {
> +                       submit->bos[contended].flags |= BO_LOCKED;
> +                       slow_locked = contended;
> +                       goto retry;
> +               }
> +       }
> +
> +       return ret;
> +}
> +
> +static int submit_bo(struct etnaviv_gem_submit *submit, u32 idx,
> +               struct etnaviv_gem_object **obj, u32 *iova)
> +{
> +       if (idx >= submit->nr_bos) {
> +               DRM_ERROR("invalid buffer index: %u (out of %u)\n",
> +                               idx, submit->nr_bos);
> +               return -EINVAL;
> +       }
> +
> +       if (obj)
> +               *obj = submit->bos[idx].obj;
> +       if (iova)
> +               *iova = submit->bos[idx].iova;
> +
> +       return 0;
> +}
> +
> +/* process the reloc's and patch up the cmdstream as needed: */
> +static int submit_reloc(struct etnaviv_gem_submit *submit, void *stream,
> +               u32 size, u32 nr_relocs, u64 relocs)
> +{
> +       u32 i, last_offset = 0;
> +       u32 *ptr = stream;
> +       int ret;
> +
> +       for (i = 0; i < nr_relocs; i++) {
> +               struct drm_etnaviv_gem_submit_reloc submit_reloc;
> +               struct etnaviv_gem_object *bobj;
> +               void __user *userptr =
> +                       to_user_ptr(relocs + (i * sizeof(submit_reloc)));
> +               u32 iova, off;
> +
> +               ret = copy_from_user(&submit_reloc, userptr,
> +                                    sizeof(submit_reloc));
> +               if (ret)
> +                       return -EFAULT;
> +
> +               if (submit_reloc.submit_offset % 4) {
> +                       DRM_ERROR("non-aligned reloc offset: %u\n",
> +                                       submit_reloc.submit_offset);
> +                       return -EINVAL;
> +               }
> +
> +               /* offset in dwords: */
> +               off = submit_reloc.submit_offset / 4;
> +
> +               if ((off >= size ) ||
> +                               (off < last_offset)) {
> +                       DRM_ERROR("invalid offset %u at reloc %u\n", off, i);
> +                       return -EINVAL;
> +               }
> +
> +               ret = submit_bo(submit, submit_reloc.reloc_idx, &bobj, &iova);
> +               if (ret)
> +                       return ret;
> +
> +               if (submit_reloc.reloc_offset >=
> +                   bobj->base.size - sizeof(*ptr)) {
> +                       DRM_ERROR("relocation %u outside object", i);
> +                       return -EINVAL;
> +               }
> +
> +               ptr[off] = iova + submit_reloc.reloc_offset;
> +
> +               last_offset = off;
> +       }
> +
> +       return 0;
> +}
> +
> +static void submit_cleanup(struct etnaviv_gem_submit *submit, bool fail)
> +{
> +       unsigned i;
> +
> +       for (i = 0; i < submit->nr_bos; i++) {
> +               struct etnaviv_gem_object *etnaviv_obj = submit->bos[i].obj;
> +
> +               submit_unlock_unpin_bo(submit, i);
> +               list_del_init(&etnaviv_obj->submit_entry);
> +               drm_gem_object_unreference(&etnaviv_obj->base);
> +       }
> +
> +       if (submit->cmdbuf)
> +               etnaviv_gpu_cmdbuf_free(submit->cmdbuf);
> +
> +       ww_acquire_fini(&submit->ticket);
> +       kfree(submit);
> +}
> +
> +int etnaviv_ioctl_gem_submit(struct drm_device *dev, void *data,
> +               struct drm_file *file)
> +{
> +       struct etnaviv_drm_private *priv = dev->dev_private;
> +       struct drm_etnaviv_gem_submit *args = data;
> +       struct etnaviv_file_private *ctx = file->driver_priv;
> +       struct drm_etnaviv_gem_submit_bo *bos;
> +       struct etnaviv_gem_submit *submit;
> +       struct etnaviv_cmdbuf *cmdbuf;
> +       struct etnaviv_gpu *gpu;
> +       void *stream;
> +       int ret;
> +
> +       if (args->pipe >= ETNA_MAX_PIPES)
> +               return -EINVAL;
> +
> +       gpu = priv->gpu[args->pipe];
> +       if (!gpu)
> +               return -ENXIO;
> +
> +       if (args->stream_size % 4) {
> +               DRM_ERROR("non-aligned cmdstream buffer size: %u\n",
> +                         args->stream_size);
> +               return -EINVAL;
> +       }
> +
> +       /*
> +        * Copy the command submission and bo array to kernel space in
> +        * one go, and do this outside of the dev->struct_mutex lock.
> +        */
> +       bos = drm_malloc_ab(args->nr_bos, sizeof(*bos));
> +       stream = drm_malloc_ab(1, args->stream_size);
> +       cmdbuf = etnaviv_gpu_cmdbuf_new(gpu, ALIGN(args->stream_size, 8) + 8);
> +       if (!bos || !stream || !cmdbuf) {
> +               ret = -ENOMEM;
> +               goto err_submit_cmds;
> +       }
> +
> +       ret = copy_from_user(bos, to_user_ptr(args->bos),
> +                            args->nr_bos * sizeof(*bos));
> +       if (ret) {
> +               ret = -EFAULT;
> +               goto err_submit_cmds;
> +       }
> +
> +       ret = copy_from_user(stream, to_user_ptr(args->stream),
> +                            args->stream_size);
> +       if (ret) {
> +               ret = -EFAULT;
> +               goto err_submit_cmds;
> +       }
> +
> +       /*
> +        * Avoid big circular locking dependency loops:
> +        * - reading debugfs results in mmap_sem depending on i_mutex_key#3
> +        *   (iterate_dir -> filldir64)
> +        * - struct_mutex depends on mmap_sem
> +        *   (vm_mmap_pgoff -> drm_gem_mmap)
> +        * then if we try to do a get_sync() under struct_mutex,
> +        * - genpd->lock depends on struct_mutex
> +        *   (etnaviv_ioctl_gem_submit -> pm_genpd_runtime_resume)
> +        * - (regulator) rdev->mutex depends on genpd->lock
> +        *   (pm_genpd_poweron -> regulator_enable)
> +        * - i_mutex_key#3 depends on rdev->mutex
> +        *   (create_regulator -> debugfs::start_creating)
> +        * and lockdep rightfully explodes.
> +        *
> +        * Avoid this by getting runtime PM outside of the struct_mutex lock.
> +        */
> +       ret = etnaviv_gpu_pm_get_sync(gpu);
> +       if (ret < 0)
> +               goto err_submit_cmds;
> +
> +       mutex_lock(&dev->struct_mutex);
> +
> +       submit = submit_create(dev, gpu, args->nr_bos);
> +       if (!submit) {
> +               ret = -ENOMEM;
> +               goto out;
> +       }
> +       submit->exec_state = args->exec_state;
> +
> +       ret = submit_lookup_objects(submit, file, bos, args->nr_bos);
> +       if (ret)
> +               goto out;
> +
> +       ret = submit_validate_objects(submit);
> +       if (ret)
> +               goto out;
> +
> +       if (!etnaviv_cmd_validate_one(gpu, stream, args->stream_size / 4)) {
> +               ret = -EINVAL;
> +               goto out;
> +       }
> +
> +       ret = submit_reloc(submit, stream, args->stream_size / 4,
> +                          args->nr_relocs, args->relocs);
> +       if (ret)
> +               goto out;
> +
> +       memcpy(cmdbuf->vaddr, stream, args->stream_size);
> +       cmdbuf->user_size = ALIGN(args->stream_size, 8);
> +       /* transfer ownership of cmdbuf to submit */
> +       submit->cmdbuf = cmdbuf;
> +       cmdbuf = NULL;
> +
> +       ret = etnaviv_gpu_submit(gpu, submit, ctx);
> +
> +       args->fence = submit->fence;
> +
> +out:
> +       if (submit)
> +               submit_cleanup(submit, !!ret);
> +       mutex_unlock(&dev->struct_mutex);
> +
> +       etnaviv_gpu_pm_put(gpu);
> +
> +       /*
> +        * If we're returning -EAGAIN, it could be due to the userptr code
> +        * wanting to run its workqueue outside of the struct_mutex.
> +        * Flush our workqueue to ensure that it is run in a timely manner.
> +        */
> +       if (ret == -EAGAIN)
> +               flush_workqueue(priv->wq);
> +
> +err_submit_cmds:
> +       /* if we still own the cmdbuf */
> +       if (cmdbuf)
> +               etnaviv_gpu_cmdbuf_free(cmdbuf);
> +       if (stream)
> +               drm_free_large(stream);
> +       if (bos)
> +               drm_free_large(bos);
> +
> +       return ret;
> +}
> diff --git a/drivers/staging/etnaviv/etnaviv_gpu.c b/drivers/staging/etnaviv/etnaviv_gpu.c
> new file mode 100644
> index 000000000000..e12fe3508db2
> --- /dev/null
> +++ b/drivers/staging/etnaviv/etnaviv_gpu.c
> @@ -0,0 +1,1468 @@
> +/*
> + * Copyright (C) 2015 Etnaviv Project
> + *
> + * This program is free software; you can redistribute it and/or modify it
> + * under the terms of the GNU General Public License version 2 as published by
> + * the Free Software Foundation.
> + *
> + * This program is distributed in the hope that it will be useful, but WITHOUT
> + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
> + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
> + * more details.
> + *
> + * You should have received a copy of the GNU General Public License along with
> + * this program.  If not, see <http://www.gnu.org/licenses/>.
> + */
> +
> +#include <linux/component.h>
> +#include <linux/of_device.h>
> +#include "etnaviv_gpu.h"
> +#include "etnaviv_gem.h"
> +#include "etnaviv_mmu.h"
> +#include "etnaviv_iommu.h"
> +#include "etnaviv_iommu_v2.h"
> +#include "common.xml.h"
> +#include "state.xml.h"
> +#include "state_hi.xml.h"
> +#include "cmdstream.xml.h"
> +
> +static const struct platform_device_id gpu_ids[] = {
> +       { .name = "etnaviv-gpu,2d" },
> +       { },
> +};
> +
> +/*
> + * Driver functions:
> + */
> +
> +int etnaviv_gpu_get_param(struct etnaviv_gpu *gpu, u32 param, u64 *value)
> +{
> +       switch (param) {
> +       case ETNAVIV_PARAM_GPU_MODEL:
> +               *value = gpu->identity.model;
> +               break;
> +
> +       case ETNAVIV_PARAM_GPU_REVISION:
> +               *value = gpu->identity.revision;
> +               break;
> +
> +       case ETNAVIV_PARAM_GPU_FEATURES_0:
> +               *value = gpu->identity.features;
> +               break;
> +
> +       case ETNAVIV_PARAM_GPU_FEATURES_1:
> +               *value = gpu->identity.minor_features0;
> +               break;
> +
> +       case ETNAVIV_PARAM_GPU_FEATURES_2:
> +               *value = gpu->identity.minor_features1;
> +               break;
> +
> +       case ETNAVIV_PARAM_GPU_FEATURES_3:
> +               *value = gpu->identity.minor_features2;
> +               break;
> +
> +       case ETNAVIV_PARAM_GPU_FEATURES_4:
> +               *value = gpu->identity.minor_features3;
> +               break;
> +
> +       case ETNAVIV_PARAM_GPU_STREAM_COUNT:
> +               *value = gpu->identity.stream_count;
> +               break;
> +
> +       case ETNAVIV_PARAM_GPU_REGISTER_MAX:
> +               *value = gpu->identity.register_max;
> +               break;
> +
> +       case ETNAVIV_PARAM_GPU_THREAD_COUNT:
> +               *value = gpu->identity.thread_count;
> +               break;
> +
> +       case ETNAVIV_PARAM_GPU_VERTEX_CACHE_SIZE:
> +               *value = gpu->identity.vertex_cache_size;
> +               break;
> +
> +       case ETNAVIV_PARAM_GPU_SHADER_CORE_COUNT:
> +               *value = gpu->identity.shader_core_count;
> +               break;
> +
> +       case ETNAVIV_PARAM_GPU_PIXEL_PIPES:
> +               *value = gpu->identity.pixel_pipes;
> +               break;
> +
> +       case ETNAVIV_PARAM_GPU_VERTEX_OUTPUT_BUFFER_SIZE:
> +               *value = gpu->identity.vertex_output_buffer_size;
> +               break;
> +
> +       case ETNAVIV_PARAM_GPU_BUFFER_SIZE:
> +               *value = gpu->identity.buffer_size;
> +               break;
> +
> +       case ETNAVIV_PARAM_GPU_INSTRUCTION_COUNT:
> +               *value = gpu->identity.instruction_count;
> +               break;
> +
> +       case ETNAVIV_PARAM_GPU_NUM_CONSTANTS:
> +               *value = gpu->identity.num_constants;
> +               break;
> +
> +       default:
> +               DBG("%s: invalid param: %u", dev_name(gpu->dev), param);
> +               return -EINVAL;
> +       }
> +
> +       return 0;
> +}
> +
> +static void etnaviv_hw_specs(struct etnaviv_gpu *gpu)
> +{
> +       if (gpu->identity.minor_features0 &
> +           chipMinorFeatures0_MORE_MINOR_FEATURES) {
> +               u32 specs[2];
> +
> +               specs[0] = gpu_read(gpu, VIVS_HI_CHIP_SPECS);
> +               specs[1] = gpu_read(gpu, VIVS_HI_CHIP_SPECS_2);
> +
> +               gpu->identity.stream_count =
> +                       (specs[0] & VIVS_HI_CHIP_SPECS_STREAM_COUNT__MASK)
> +                               >> VIVS_HI_CHIP_SPECS_STREAM_COUNT__SHIFT;
> +               gpu->identity.register_max =
> +                       (specs[0] & VIVS_HI_CHIP_SPECS_REGISTER_MAX__MASK)
> +                               >> VIVS_HI_CHIP_SPECS_REGISTER_MAX__SHIFT;
> +               gpu->identity.thread_count =
> +                       (specs[0] & VIVS_HI_CHIP_SPECS_THREAD_COUNT__MASK)
> +                               >> VIVS_HI_CHIP_SPECS_THREAD_COUNT__SHIFT;
> +               gpu->identity.vertex_cache_size =
> +                       (specs[0] & VIVS_HI_CHIP_SPECS_VERTEX_CACHE_SIZE__MASK)
> +                               >> VIVS_HI_CHIP_SPECS_VERTEX_CACHE_SIZE__SHIFT;
> +               gpu->identity.shader_core_count =
> +                       (specs[0] & VIVS_HI_CHIP_SPECS_SHADER_CORE_COUNT__MASK)
> +                               >> VIVS_HI_CHIP_SPECS_SHADER_CORE_COUNT__SHIFT;
> +               gpu->identity.pixel_pipes =
> +                       (specs[0] & VIVS_HI_CHIP_SPECS_PIXEL_PIPES__MASK)
> +                               >> VIVS_HI_CHIP_SPECS_PIXEL_PIPES__SHIFT;
> +               gpu->identity.vertex_output_buffer_size =
> +                       (specs[0] & VIVS_HI_CHIP_SPECS_VERTEX_OUTPUT_BUFFER_SIZE__MASK)
> +                               >> VIVS_HI_CHIP_SPECS_VERTEX_OUTPUT_BUFFER_SIZE__SHIFT;
> +
> +               gpu->identity.buffer_size =
> +                       (specs[1] & VIVS_HI_CHIP_SPECS_2_BUFFER_SIZE__MASK)
> +                               >> VIVS_HI_CHIP_SPECS_2_BUFFER_SIZE__SHIFT;
> +               gpu->identity.instruction_count =
> +                       (specs[1] & VIVS_HI_CHIP_SPECS_2_INSTRUCTION_COUNT__MASK)
> +                               >> VIVS_HI_CHIP_SPECS_2_INSTRUCTION_COUNT__SHIFT;
> +               gpu->identity.num_constants =
> +                       (specs[1] & VIVS_HI_CHIP_SPECS_2_NUM_CONSTANTS__MASK)
> +                               >> VIVS_HI_CHIP_SPECS_2_NUM_CONSTANTS__SHIFT;
> +
> +               gpu->identity.register_max = 1 << gpu->identity.register_max;
> +               gpu->identity.thread_count = 1 << gpu->identity.thread_count;
> +               gpu->identity.vertex_output_buffer_size =
> +                       1 << gpu->identity.vertex_output_buffer_size;
> +       } else {
> +               dev_err(gpu->dev, "TODO: determine GPU specs based on model\n");
> +       }
> +
> +       switch (gpu->identity.instruction_count) {
> +       case 0:
> +               if ((gpu->identity.model == 0x2000 &&
> +                    gpu->identity.revision == 0x5108) ||
> +                   gpu->identity.model == 0x880)
> +                       gpu->identity.instruction_count = 512;
> +               else
> +                       gpu->identity.instruction_count = 256;
> +               break;
> +
> +       case 1:
> +               gpu->identity.instruction_count = 1024;
> +               break;
> +
> +       case 2:
> +               gpu->identity.instruction_count = 2048;
> +               break;
> +
> +       default:
> +               gpu->identity.instruction_count = 256;
> +               break;
> +       }
> +}
> +
> +static void etnaviv_hw_identify(struct etnaviv_gpu *gpu)
> +{
> +       u32 chipIdentity;
> +
> +       chipIdentity = gpu_read(gpu, VIVS_HI_CHIP_IDENTITY);
> +
> +       /* Special case for older graphic cores. */
> +       if (VIVS_HI_CHIP_IDENTITY_FAMILY(chipIdentity) ==  0x01) {
> +               gpu->identity.model    = 0x500; /* gc500 */
> +               gpu->identity.revision = VIVS_HI_CHIP_IDENTITY_REVISION(chipIdentity);
> +       } else {
> +
> +               gpu->identity.model = gpu_read(gpu, VIVS_HI_CHIP_MODEL);
> +               gpu->identity.revision = gpu_read(gpu, VIVS_HI_CHIP_REV);
> +
> +               /*
> +                * !!!! HACK ALERT !!!!
> +                * Because people change device IDs without letting software
> +                * know about it - here is the hack to make it all look the
> +                * same.  Only for GC400 family.
> +                */
> +               if ((gpu->identity.model & 0xff00) == 0x0400 &&
> +                   gpu->identity.model != 0x0420) {
> +                       gpu->identity.model = gpu->identity.model & 0x0400;
> +               }
> +
> +               /* Another special case */
> +               if (gpu->identity.model == 0x300 &&
> +                   gpu->identity.revision == 0x2201) {
> +                       u32 chipDate = gpu_read(gpu, VIVS_HI_CHIP_DATE);
> +                       u32 chipTime = gpu_read(gpu, VIVS_HI_CHIP_TIME);
> +
> +                       if (chipDate == 0x20080814 && chipTime == 0x12051100) {
> +                               /*
> +                                * This IP has an ECO; put the correct
> +                                * revision in it.
> +                                */
> +                               gpu->identity.revision = 0x1051;
> +                       }
> +               }
> +       }
> +
> +       dev_info(gpu->dev, "model: GC%x, revision: %x\n",
> +                gpu->identity.model, gpu->identity.revision);
> +
> +       gpu->identity.features = gpu_read(gpu, VIVS_HI_CHIP_FEATURE);
> +
> +       /* Disable fast clear on GC700. */
> +       if (gpu->identity.model == 0x700)
> +               gpu->identity.features &= ~chipFeatures_FAST_CLEAR;
> +
> +       if ((gpu->identity.model == 0x500 && gpu->identity.revision < 2) ||
> +           (gpu->identity.model == 0x300 && gpu->identity.revision < 0x2000)) {
> +
> +               /*
> +                * GC500 rev 1.x and GC300 rev < 2.0 doesn't have these
> +                * registers.
> +                */
> +               gpu->identity.minor_features0 = 0;
> +               gpu->identity.minor_features1 = 0;
> +               gpu->identity.minor_features2 = 0;
> +               gpu->identity.minor_features3 = 0;
> +       } else
> +               gpu->identity.minor_features0 =
> +                               gpu_read(gpu, VIVS_HI_CHIP_MINOR_FEATURE_0);
> +
> +       if (gpu->identity.minor_features0 &
> +           chipMinorFeatures0_MORE_MINOR_FEATURES) {
> +               gpu->identity.minor_features1 =
> +                               gpu_read(gpu, VIVS_HI_CHIP_MINOR_FEATURE_1);
> +               gpu->identity.minor_features2 =
> +                               gpu_read(gpu, VIVS_HI_CHIP_MINOR_FEATURE_2);
> +               gpu->identity.minor_features3 =
> +                               gpu_read(gpu, VIVS_HI_CHIP_MINOR_FEATURE_3);
> +       }
> +
> +       /* GC600 idle register reports zero bits where modules aren't present */
> +       if (gpu->identity.model == chipModel_GC600) {
> +               gpu->idle_mask = VIVS_HI_IDLE_STATE_TX |
> +                                VIVS_HI_IDLE_STATE_RA |
> +                                VIVS_HI_IDLE_STATE_SE |
> +                                VIVS_HI_IDLE_STATE_PA |
> +                                VIVS_HI_IDLE_STATE_SH |
> +                                VIVS_HI_IDLE_STATE_PE |
> +                                VIVS_HI_IDLE_STATE_DE |
> +                                VIVS_HI_IDLE_STATE_FE;
> +       } else {
> +               gpu->idle_mask = ~VIVS_HI_IDLE_STATE_AXI_LP;
> +       }
> +
> +       etnaviv_hw_specs(gpu);
> +}
> +
> +static void etnaviv_gpu_load_clock(struct etnaviv_gpu *gpu, u32 clock)
> +{
> +       gpu_write(gpu, VIVS_HI_CLOCK_CONTROL, clock |
> +                 VIVS_HI_CLOCK_CONTROL_FSCALE_CMD_LOAD);
> +       gpu_write(gpu, VIVS_HI_CLOCK_CONTROL, clock);
> +}
> +
> +static int etnaviv_hw_reset(struct etnaviv_gpu *gpu)
> +{
> +       u32 control, idle;
> +       unsigned long timeout;
> +       bool failed = true;
> +
> +       /* TODO
> +        *
> +        * - clock gating
> +        * - puls eater
> +        * - what about VG?
> +        */
> +
> +       /* We hope that the GPU resets in under one second */
> +       timeout = jiffies + msecs_to_jiffies(1000);
> +
> +       while (time_is_after_jiffies(timeout)) {
> +               control = VIVS_HI_CLOCK_CONTROL_DISABLE_DEBUG_REGISTERS |
> +                         VIVS_HI_CLOCK_CONTROL_FSCALE_VAL(0x40);
> +
> +               /* enable clock */
> +               etnaviv_gpu_load_clock(gpu, control);
> +
> +               /* Wait for stable clock.  Vivante's code waited for 1ms */
> +               usleep_range(1000, 10000);
> +
> +               /* isolate the GPU. */
> +               control |= VIVS_HI_CLOCK_CONTROL_ISOLATE_GPU;
> +               gpu_write(gpu, VIVS_HI_CLOCK_CONTROL, control);
> +
> +               /* set soft reset. */
> +               control |= VIVS_HI_CLOCK_CONTROL_SOFT_RESET;
> +               gpu_write(gpu, VIVS_HI_CLOCK_CONTROL, control);
> +
> +               /* wait for reset. */
> +               msleep(1);
> +
> +               /* reset soft reset bit. */
> +               control &= ~VIVS_HI_CLOCK_CONTROL_SOFT_RESET;
> +               gpu_write(gpu, VIVS_HI_CLOCK_CONTROL, control);
> +
> +               /* reset GPU isolation. */
> +               control &= ~VIVS_HI_CLOCK_CONTROL_ISOLATE_GPU;
> +               gpu_write(gpu, VIVS_HI_CLOCK_CONTROL, control);
> +
> +               /* read idle register. */
> +               idle = gpu_read(gpu, VIVS_HI_IDLE_STATE);
> +
> +               /* try reseting again if FE it not idle */
> +               if ((idle & VIVS_HI_IDLE_STATE_FE) == 0) {
> +                       dev_dbg(gpu->dev, "FE is not idle\n");
> +                       continue;
> +               }
> +
> +               /* read reset register. */
> +               control = gpu_read(gpu, VIVS_HI_CLOCK_CONTROL);
> +
> +               /* is the GPU idle? */
> +               if (((control & VIVS_HI_CLOCK_CONTROL_IDLE_3D) == 0) ||
> +                   ((control & VIVS_HI_CLOCK_CONTROL_IDLE_2D) == 0)) {
> +                       dev_dbg(gpu->dev, "GPU is not idle\n");
> +                       continue;
> +               }
> +
> +               failed = false;
> +               break;
> +       }
> +
> +       if (failed) {
> +               idle = gpu_read(gpu, VIVS_HI_IDLE_STATE);
> +               control = gpu_read(gpu, VIVS_HI_CLOCK_CONTROL);
> +
> +               dev_err(gpu->dev, "GPU failed to reset: FE %sidle, 3D %sidle, 2D %sidle\n",
> +                       idle & VIVS_HI_IDLE_STATE_FE ? "" : "not ",
> +                       control & VIVS_HI_CLOCK_CONTROL_IDLE_3D ? "" : "not ",
> +                       control & VIVS_HI_CLOCK_CONTROL_IDLE_2D ? "" : "not ");
> +
> +               return -EBUSY;
> +       }
> +
> +       /* We rely on the GPU running, so program the clock */
> +       control = VIVS_HI_CLOCK_CONTROL_DISABLE_DEBUG_REGISTERS |
> +                 VIVS_HI_CLOCK_CONTROL_FSCALE_VAL(0x40);
> +
> +       /* enable clock */
> +       etnaviv_gpu_load_clock(gpu, control);
> +
> +       return 0;
> +}
> +
> +static void etnaviv_gpu_hw_init(struct etnaviv_gpu *gpu)
> +{
> +       u16 prefetch;
> +
> +       if (gpu->identity.model == chipModel_GC320 &&
> +           gpu_read(gpu, VIVS_HI_CHIP_TIME) != 0x2062400 &&
> +           (gpu->identity.revision == 0x5007 ||
> +            gpu->identity.revision == 0x5220)) {
> +               u32 mc_memory_debug;
> +
> +               mc_memory_debug = gpu_read(gpu, VIVS_MC_DEBUG_MEMORY) & ~0xff;
> +
> +               if (gpu->identity.revision == 0x5007)
> +                       mc_memory_debug |= 0x0c;
> +               else
> +                       mc_memory_debug |= 0x08;
> +
> +               gpu_write(gpu, VIVS_MC_DEBUG_MEMORY, mc_memory_debug);
> +       }
> +
> +       /*
> +        * Update GPU AXI cache atttribute to "cacheable, no allocate".
> +        * This is necessary to prevent the iMX6 SoC locking up.
> +        */
> +       gpu_write(gpu, VIVS_HI_AXI_CONFIG,
> +                 VIVS_HI_AXI_CONFIG_AWCACHE(2) |
> +                 VIVS_HI_AXI_CONFIG_ARCACHE(2));
> +
> +       /* GC2000 rev 5108 needs a special bus config */
> +       if (gpu->identity.model == 0x2000 && gpu->identity.revision == 0x5108) {
> +               u32 bus_config = gpu_read(gpu, VIVS_MC_BUS_CONFIG);
> +               bus_config &= ~(VIVS_MC_BUS_CONFIG_FE_BUS_CONFIG__MASK |
> +                               VIVS_MC_BUS_CONFIG_TX_BUS_CONFIG__MASK);
> +               bus_config |= VIVS_MC_BUS_CONFIG_FE_BUS_CONFIG(1) |
> +                             VIVS_MC_BUS_CONFIG_TX_BUS_CONFIG(0);
> +               gpu_write(gpu, VIVS_MC_BUS_CONFIG, bus_config);
> +       }
> +
> +       /* set base addresses */
> +       gpu_write(gpu, VIVS_MC_MEMORY_BASE_ADDR_RA, gpu->memory_base);
> +       gpu_write(gpu, VIVS_MC_MEMORY_BASE_ADDR_FE, gpu->memory_base);
> +       gpu_write(gpu, VIVS_MC_MEMORY_BASE_ADDR_TX, gpu->memory_base);
> +       gpu_write(gpu, VIVS_MC_MEMORY_BASE_ADDR_PEZ, gpu->memory_base);
> +       gpu_write(gpu, VIVS_MC_MEMORY_BASE_ADDR_PE, gpu->memory_base);
> +
> +       /* setup the MMU page table pointers */
> +       etnaviv_iommu_domain_restore(gpu, gpu->mmu->domain);
> +
> +       /* Start command processor */
> +       prefetch = etnaviv_buffer_init(gpu);
> +
> +       gpu_write(gpu, VIVS_HI_INTR_ENBL, ~0U);
> +       gpu_write(gpu, VIVS_FE_COMMAND_ADDRESS,
> +                 gpu->buffer->paddr - gpu->memory_base);
> +       gpu_write(gpu, VIVS_FE_COMMAND_CONTROL,
> +                 VIVS_FE_COMMAND_CONTROL_ENABLE |
> +                 VIVS_FE_COMMAND_CONTROL_PREFETCH(prefetch));
> +}
> +
> +int etnaviv_gpu_init(struct etnaviv_gpu *gpu)
> +{
> +       int ret, i;
> +       struct iommu_domain *iommu;
> +       enum etnaviv_iommu_version version;
> +       bool mmuv2;
> +
> +       ret = pm_runtime_get_sync(gpu->dev);
> +       if (ret < 0)
> +               return ret;
> +
> +       etnaviv_hw_identify(gpu);
> +
> +       if (gpu->identity.model == 0) {
> +               dev_err(gpu->dev, "Unknown GPU model\n");
> +               pm_runtime_put_autosuspend(gpu->dev);
> +               return -ENXIO;
> +       }
> +
> +       ret = etnaviv_hw_reset(gpu);
> +       if (ret)
> +               goto fail;
> +
> +       /* Setup IOMMU.. eventually we will (I think) do this once per context
> +        * and have separate page tables per context.  For now, to keep things
> +        * simple and to get something working, just use a single address space:
> +        */
> +       mmuv2 = gpu->identity.minor_features1 & chipMinorFeatures1_MMU_VERSION;
> +       dev_dbg(gpu->dev, "mmuv2: %d\n", mmuv2);
> +
> +       if (!mmuv2) {
> +               iommu = etnaviv_iommu_domain_alloc(gpu);
> +               version = ETNAVIV_IOMMU_V1;
> +       } else {
> +               iommu = etnaviv_iommu_v2_domain_alloc(gpu);
> +               version = ETNAVIV_IOMMU_V2;
> +       }
> +
> +       if (!iommu) {
> +               ret = -ENOMEM;
> +               goto fail;
> +       }
> +
> +       /* TODO: we will leak here memory - fix it! */
> +
> +       gpu->mmu = etnaviv_iommu_new(gpu->dev, iommu, version);
> +       if (!gpu->mmu) {
> +               ret = -ENOMEM;
> +               goto fail;
> +       }
> +
> +       /* Create buffer: */
> +       gpu->buffer = etnaviv_gpu_cmdbuf_new(gpu, PAGE_SIZE);
> +       if (!gpu->buffer) {
> +               ret = -ENOMEM;
> +               dev_err(gpu->dev, "could not create command buffer\n");
> +               goto fail;
> +       }
> +
> +       /* Setup event management */
> +       spin_lock_init(&gpu->event_spinlock);
> +       init_completion(&gpu->event_free);
> +       for (i = 0; i < ARRAY_SIZE(gpu->event); i++) {
> +               gpu->event[i].used = false;
> +               complete(&gpu->event_free);
> +       }
> +
> +       /* Now program the hardware */
> +       mutex_lock(&gpu->drm->struct_mutex);
> +       etnaviv_gpu_hw_init(gpu);
> +       mutex_unlock(&gpu->drm->struct_mutex);
> +
> +       pm_runtime_mark_last_busy(gpu->dev);
> +       pm_runtime_put_autosuspend(gpu->dev);
> +
> +       return 0;
> +
> +fail:
> +       pm_runtime_mark_last_busy(gpu->dev);
> +       pm_runtime_put_autosuspend(gpu->dev);
> +
> +       return ret;
> +}
> +
> +#ifdef CONFIG_DEBUG_FS
> +struct dma_debug {
> +       u32 address[2];
> +       u32 state[2];
> +};
> +
> +static void verify_dma(struct etnaviv_gpu *gpu, struct dma_debug *debug)
> +{
> +       u32 i;
> +
> +       debug->address[0] = gpu_read(gpu, VIVS_FE_DMA_ADDRESS);
> +       debug->state[0]   = gpu_read(gpu, VIVS_FE_DMA_DEBUG_STATE);
> +
> +       for (i = 0; i < 500; i++) {
> +               debug->address[1] = gpu_read(gpu, VIVS_FE_DMA_ADDRESS);
> +               debug->state[1]   = gpu_read(gpu, VIVS_FE_DMA_DEBUG_STATE);
> +
> +               if (debug->address[0] != debug->address[1])
> +                       break;
> +
> +               if (debug->state[0] != debug->state[1])
> +                       break;
> +       }
> +}
> +
> +int etnaviv_gpu_debugfs(struct etnaviv_gpu *gpu, struct seq_file *m)
> +{
> +       struct dma_debug debug;
> +       u32 dma_lo, dma_hi, axi, idle;
> +       int ret;
> +
> +       seq_printf(m, "%s Status:\n", dev_name(gpu->dev));
> +
> +       ret = pm_runtime_get_sync(gpu->dev);
> +       if (ret < 0)
> +               return ret;
> +
> +       ret = mutex_lock_interruptible(&gpu->drm->struct_mutex);
> +       if (ret < 0)
> +               goto err_rpm;
> +
> +       dma_lo = gpu_read(gpu, VIVS_FE_DMA_LOW);
> +       dma_hi = gpu_read(gpu, VIVS_FE_DMA_HIGH);
> +       axi = gpu_read(gpu, VIVS_HI_AXI_STATUS);
> +       idle = gpu_read(gpu, VIVS_HI_IDLE_STATE);
> +
> +       verify_dma(gpu, &debug);
> +
> +       seq_puts(m, "\tfeatures\n");
> +       seq_printf(m, "\t minor_features0: 0x%08x\n",
> +                  gpu->identity.minor_features0);
> +       seq_printf(m, "\t minor_features1: 0x%08x\n",
> +                  gpu->identity.minor_features1);
> +       seq_printf(m, "\t minor_features2: 0x%08x\n",
> +                  gpu->identity.minor_features2);
> +       seq_printf(m, "\t minor_features3: 0x%08x\n",
> +                  gpu->identity.minor_features3);
> +
> +       seq_puts(m, "\tspecs\n");
> +       seq_printf(m, "\t stream_count:  %d\n",
> +                       gpu->identity.stream_count);
> +       seq_printf(m, "\t register_max: %d\n",
> +                       gpu->identity.register_max);
> +       seq_printf(m, "\t thread_count: %d\n",
> +                       gpu->identity.thread_count);
> +       seq_printf(m, "\t vertex_cache_size: %d\n",
> +                       gpu->identity.vertex_cache_size);
> +       seq_printf(m, "\t shader_core_count: %d\n",
> +                       gpu->identity.shader_core_count);
> +       seq_printf(m, "\t pixel_pipes: %d\n",
> +                       gpu->identity.pixel_pipes);
> +       seq_printf(m, "\t vertex_output_buffer_size: %d\n",
> +                       gpu->identity.vertex_output_buffer_size);
> +       seq_printf(m, "\t buffer_size: %d\n",
> +                       gpu->identity.buffer_size);
> +       seq_printf(m, "\t instruction_count: %d\n",
> +                       gpu->identity.instruction_count);
> +       seq_printf(m, "\t num_constants: %d\n",
> +                       gpu->identity.num_constants);
> +
> +       seq_printf(m, "\taxi: 0x%08x\n", axi);
> +       seq_printf(m, "\tidle: 0x%08x\n", idle);
> +       idle |= ~gpu->idle_mask & ~VIVS_HI_IDLE_STATE_AXI_LP;
> +       if ((idle & VIVS_HI_IDLE_STATE_FE) == 0)
> +               seq_puts(m, "\t FE is not idle\n");
> +       if ((idle & VIVS_HI_IDLE_STATE_DE) == 0)
> +               seq_puts(m, "\t DE is not idle\n");
> +       if ((idle & VIVS_HI_IDLE_STATE_PE) == 0)
> +               seq_puts(m, "\t PE is not idle\n");
> +       if ((idle & VIVS_HI_IDLE_STATE_SH) == 0)
> +               seq_puts(m, "\t SH is not idle\n");
> +       if ((idle & VIVS_HI_IDLE_STATE_PA) == 0)
> +               seq_puts(m, "\t PA is not idle\n");
> +       if ((idle & VIVS_HI_IDLE_STATE_SE) == 0)
> +               seq_puts(m, "\t SE is not idle\n");
> +       if ((idle & VIVS_HI_IDLE_STATE_RA) == 0)
> +               seq_puts(m, "\t RA is not idle\n");
> +       if ((idle & VIVS_HI_IDLE_STATE_TX) == 0)
> +               seq_puts(m, "\t TX is not idle\n");
> +       if ((idle & VIVS_HI_IDLE_STATE_VG) == 0)
> +               seq_puts(m, "\t VG is not idle\n");
> +       if ((idle & VIVS_HI_IDLE_STATE_IM) == 0)
> +               seq_puts(m, "\t IM is not idle\n");
> +       if ((idle & VIVS_HI_IDLE_STATE_FP) == 0)
> +               seq_puts(m, "\t FP is not idle\n");
> +       if ((idle & VIVS_HI_IDLE_STATE_TS) == 0)
> +               seq_puts(m, "\t TS is not idle\n");
> +       if (idle & VIVS_HI_IDLE_STATE_AXI_LP)
> +               seq_puts(m, "\t AXI low power mode\n");
> +
> +       if (gpu->identity.features & chipFeatures_DEBUG_MODE) {
> +               u32 read0 = gpu_read(gpu, VIVS_MC_DEBUG_READ0);
> +               u32 read1 = gpu_read(gpu, VIVS_MC_DEBUG_READ1);
> +               u32 write = gpu_read(gpu, VIVS_MC_DEBUG_WRITE);
> +
> +               seq_puts(m, "\tMC\n");
> +               seq_printf(m, "\t read0: 0x%08x\n", read0);
> +               seq_printf(m, "\t read1: 0x%08x\n", read1);
> +               seq_printf(m, "\t write: 0x%08x\n", write);
> +       }
> +
> +       seq_puts(m, "\tDMA ");
> +
> +       if (debug.address[0] == debug.address[1] &&
> +           debug.state[0] == debug.state[1]) {
> +               seq_puts(m, "seems to be stuck\n");
> +       } else if (debug.address[0] == debug.address[1]) {
> +               seq_puts(m, "adress is constant\n");
> +       } else {
> +               seq_puts(m, "is runing\n");
> +       }
> +
> +       seq_printf(m, "\t address 0: 0x%08x\n", debug.address[0]);
> +       seq_printf(m, "\t address 1: 0x%08x\n", debug.address[1]);
> +       seq_printf(m, "\t state 0: 0x%08x\n", debug.state[0]);
> +       seq_printf(m, "\t state 1: 0x%08x\n", debug.state[1]);
> +       seq_printf(m, "\t last fetch 64 bit word: 0x%08x 0x%08x\n",
> +                  dma_lo, dma_hi);
> +
> +       ret = 0;
> +
> +       mutex_unlock(&gpu->drm->struct_mutex);
> +
> +err_rpm:
> +       pm_runtime_mark_last_busy(gpu->dev);
> +       pm_runtime_put_autosuspend(gpu->dev);
> +
> +       return ret;
> +}
> +#endif
> +
> +/*
> + * Power Management:
> + */
> +static int enable_clk(struct etnaviv_gpu *gpu)
> +{
> +       if (gpu->clk_core)
> +               clk_prepare_enable(gpu->clk_core);
> +       if (gpu->clk_shader)
> +               clk_prepare_enable(gpu->clk_shader);
> +
> +       return 0;
> +}
> +
> +static int disable_clk(struct etnaviv_gpu *gpu)
> +{
> +       if (gpu->clk_core)
> +               clk_disable_unprepare(gpu->clk_core);
> +       if (gpu->clk_shader)
> +               clk_disable_unprepare(gpu->clk_shader);
> +
> +       return 0;
> +}
> +
> +static int enable_axi(struct etnaviv_gpu *gpu)
> +{
> +       if (gpu->clk_bus)
> +               clk_prepare_enable(gpu->clk_bus);
> +
> +       return 0;
> +}
> +
> +static int disable_axi(struct etnaviv_gpu *gpu)
> +{
> +       if (gpu->clk_bus)
> +               clk_disable_unprepare(gpu->clk_bus);
> +
> +       return 0;
> +}
> +
> +/*
> + * Hangcheck detection for locked gpu:
> + */
> +static void recover_worker(struct work_struct *work)
> +{
> +       struct etnaviv_gpu *gpu = container_of(work, struct etnaviv_gpu,
> +                                              recover_work);
> +       struct drm_device *dev = gpu->drm;
> +       unsigned long flags;
> +       unsigned int i;
> +
> +       dev_err(gpu->dev, "hangcheck recover!\n");
> +
> +       if (pm_runtime_get_sync(gpu->dev) < 0)
> +               return;
> +
> +       mutex_lock(&dev->struct_mutex);
> +
> +       etnaviv_hw_reset(gpu);
> +
> +       /* complete all events, the GPU won't do it after the reset */
> +       spin_lock_irqsave(&gpu->event_spinlock, flags);
> +       for (i = 0; i < ARRAY_SIZE(gpu->event); i++) {
> +               if (!gpu->event[i].used)
> +                       continue;
> +               gpu->event[i].used = false;
> +               complete(&gpu->event_free);
> +               /*
> +                * Decrement the PM count for each stuck event. This is safe
> +                * even in atomic context as we use ASYNC RPM here.
> +                */
> +               pm_runtime_put_autosuspend(gpu->dev);
> +       }
> +       spin_unlock_irqrestore(&gpu->event_spinlock, flags);
> +       gpu->completed_fence = gpu->submitted_fence;
> +
> +       etnaviv_gpu_hw_init(gpu);
> +       gpu->switch_context = true;
> +
> +       mutex_unlock(&dev->struct_mutex);
> +       pm_runtime_mark_last_busy(gpu->dev);
> +       pm_runtime_put_autosuspend(gpu->dev);
> +
> +       /* Retire the buffer objects in a work */
> +       etnaviv_queue_work(gpu->drm, &gpu->retire_work);
> +}
> +
> +static void hangcheck_timer_reset(struct etnaviv_gpu *gpu)
> +{
> +       DBG("%s", dev_name(gpu->dev));
> +       mod_timer(&gpu->hangcheck_timer,
> +                 round_jiffies_up(jiffies + DRM_ETNAVIV_HANGCHECK_JIFFIES));
> +}
> +
> +static void hangcheck_handler(unsigned long data)
> +{
> +       struct etnaviv_gpu *gpu = (struct etnaviv_gpu *)data;
> +       u32 fence = gpu->completed_fence;
> +       bool progress = false;
> +
> +       if (fence != gpu->hangcheck_fence) {
> +               gpu->hangcheck_fence = fence;
> +               progress = true;
> +       }
> +
> +       if (!progress) {
> +               u32 dma_addr = gpu_read(gpu, VIVS_FE_DMA_ADDRESS);
> +               int change = dma_addr - gpu->hangcheck_dma_addr;
> +
> +               if (change < 0 || change > 16) {
> +                       gpu->hangcheck_dma_addr = dma_addr;
> +                       progress = true;
> +               }
> +       }
> +
> +       if (!progress && fence_after(gpu->submitted_fence, fence)) {
> +               dev_err(gpu->dev, "hangcheck detected gpu lockup!\n");
> +               dev_err(gpu->dev, "     completed fence: %u\n", fence);
> +               dev_err(gpu->dev, "     submitted fence: %u\n",
> +                       gpu->submitted_fence);
> +               etnaviv_queue_work(gpu->drm, &gpu->recover_work);
> +       }
> +
> +       /* if still more pending work, reset the hangcheck timer: */
> +       if (fence_after(gpu->submitted_fence, gpu->hangcheck_fence))
> +               hangcheck_timer_reset(gpu);
> +}
> +
> +static void hangcheck_disable(struct etnaviv_gpu *gpu)
> +{
> +       del_timer_sync(&gpu->hangcheck_timer);
> +       cancel_work_sync(&gpu->recover_work);
> +}
> +
> +/*
> + * event management:
> + */
> +
> +static unsigned int event_alloc(struct etnaviv_gpu *gpu)
> +{
> +       unsigned long ret, flags;
> +       unsigned int i, event = ~0U;
> +
> +       ret = wait_for_completion_timeout(&gpu->event_free,
> +                                         msecs_to_jiffies(10 * 10000));
> +       if (!ret)
> +               dev_err(gpu->dev, "wait_for_completion_timeout failed");
> +
> +       spin_lock_irqsave(&gpu->event_spinlock, flags);
> +
> +       /* find first free event */
> +       for (i = 0; i < ARRAY_SIZE(gpu->event); i++) {
> +               if (gpu->event[i].used == false) {
> +                       gpu->event[i].used = true;
> +                       event = i;
> +                       break;
> +               }
> +       }
> +
> +       spin_unlock_irqrestore(&gpu->event_spinlock, flags);
> +
> +       return event;
> +}
> +
> +static void event_free(struct etnaviv_gpu *gpu, unsigned int event)
> +{
> +       unsigned long flags;
> +
> +       spin_lock_irqsave(&gpu->event_spinlock, flags);
> +
> +       if (gpu->event[event].used == false) {
> +               dev_warn(gpu->dev, "event %u is already marked as free",
> +                        event);
> +               spin_unlock_irqrestore(&gpu->event_spinlock, flags);
> +       } else {
> +               gpu->event[event].used = false;
> +               spin_unlock_irqrestore(&gpu->event_spinlock, flags);
> +
> +               complete(&gpu->event_free);
> +       }
> +}
> +
> +/*
> + * Cmdstream submission/retirement:
> + */
> +
> +struct etnaviv_cmdbuf *etnaviv_gpu_cmdbuf_new(struct etnaviv_gpu *gpu, u32 size)
> +{
> +       struct etnaviv_cmdbuf *cmdbuf;
> +
> +       cmdbuf = kzalloc(sizeof(*cmdbuf), GFP_KERNEL);
> +       if (!cmdbuf)
> +               return NULL;
> +
> +       cmdbuf->vaddr = dma_alloc_writecombine(gpu->dev, size, &cmdbuf->paddr,
> +                                              GFP_KERNEL);
> +       if (!cmdbuf->vaddr) {
> +               kfree(cmdbuf);
> +               return NULL;
> +       }
> +
> +       cmdbuf->gpu = gpu;
> +       cmdbuf->size = size;
> +
> +       return cmdbuf;
> +}
> +
> +void etnaviv_gpu_cmdbuf_free(struct etnaviv_cmdbuf *cmdbuf)
> +{
> +       dma_free_writecombine(cmdbuf->gpu->dev, cmdbuf->size,
> +                             cmdbuf->vaddr, cmdbuf->paddr);
> +       kfree(cmdbuf);
> +}
> +
> +static void retire_worker(struct work_struct *work)
> +{
> +       struct etnaviv_gpu *gpu = container_of(work, struct etnaviv_gpu,
> +                                              retire_work);
> +       struct drm_device *dev = gpu->drm;
> +       u32 fence = gpu->completed_fence;
> +       struct etnaviv_cmdbuf *cmdbuf, *tmp;
> +
> +       mutex_lock(&dev->struct_mutex);
> +
> +       while (!list_empty(&gpu->active_list)) {
> +               struct etnaviv_gem_object *obj;
> +
> +               obj = list_first_entry(&gpu->active_list,
> +                               struct etnaviv_gem_object, mm_list);
> +
> +               if ((!(obj->access & ETNA_SUBMIT_BO_READ) ||
> +                    fence_after_eq(fence, obj->read_fence)) &&
> +                   (!(obj->access & ETNA_SUBMIT_BO_WRITE) ||
> +                    fence_after_eq(fence, obj->write_fence))) {
> +                       /* move to inactive: */
> +                       etnaviv_gem_move_to_inactive(&obj->base);
> +                       etnaviv_gem_put_iova(&obj->base);
> +                       drm_gem_object_unreference(&obj->base);
> +               } else {
> +                       break;
> +               }
> +       }
> +
> +       list_for_each_entry_safe(cmdbuf, tmp, &gpu->active_cmd_list,
> +                                gpu_active_list) {
> +               if (fence_after_eq(fence, cmdbuf->fence)) {
> +                       etnaviv_gpu_cmdbuf_free(cmdbuf);
> +                       list_del(&cmdbuf->gpu_active_list);
> +               }
> +       }
> +
> +       gpu->retired_fence = fence;
> +
> +       mutex_unlock(&dev->struct_mutex);
> +
> +       wake_up_all(&gpu->fence_event);
> +}
> +
> +static unsigned long etnaviv_timeout_to_jiffies(struct timespec *timeout)
> +{
> +       unsigned long timeout_jiffies = timespec_to_jiffies(timeout);
> +       unsigned long start_jiffies = jiffies;
> +       unsigned long remaining_jiffies;
> +
> +       if (time_after(start_jiffies, timeout_jiffies))
> +               remaining_jiffies = 0;
> +       else
> +               remaining_jiffies = timeout_jiffies - start_jiffies;
> +
> +       return remaining_jiffies;
> +}
> +
> +int etnaviv_gpu_wait_fence_interruptible(struct etnaviv_gpu *gpu,
> +       u32 fence, struct timespec *timeout)
> +{
> +       int ret;
> +
> +       if (fence_after(fence, gpu->submitted_fence)) {
> +               DRM_ERROR("waiting on invalid fence: %u (of %u)\n",
> +                               fence, gpu->submitted_fence);
> +               return -EINVAL;
> +       }
> +
> +       if (!timeout) {
> +               /* No timeout was requested: just test for completion */
> +               ret = fence_completed(gpu, fence) ? 0 : -EBUSY;
> +       } else {
> +               unsigned long remaining = etnaviv_timeout_to_jiffies(timeout);
> +
> +               ret = wait_event_interruptible_timeout(gpu->fence_event,
> +                                               fence_completed(gpu, fence),
> +                                               remaining);
> +               if (ret == 0) {
> +                       DBG("timeout waiting for fence: %u (retired: %u completed: %u)",
> +                               fence, gpu->retired_fence,
> +                               gpu->completed_fence);
> +                       ret = -ETIMEDOUT;
> +               } else if (ret != -ERESTARTSYS) {
> +                       ret = 0;
> +               }
> +       }
> +
> +       return ret;
> +}
> +
> +/*
> + * Wait for an object to become inactive.  This, on it's own, is not race
> + * free: the object is moved by the retire worker off the active list, and
> + * then the iova is put.  Moreover, the object could be re-submitted just
> + * after we notice that it's become inactive.
> + *
> + * Although the retirement happens under the struct_mutex, we don't want
> + * to hold that lock in this function.  Instead, the caller is responsible
> + * for ensuring that the retire worker has finished (which will happen, eg,
> + * when we unreference the object, an action which takes the struct_mutex.)
> + */
> +int etnaviv_gpu_wait_obj_inactive(struct etnaviv_gpu *gpu,
> +       struct etnaviv_gem_object *etnaviv_obj, struct timespec *timeout)
> +{
> +       unsigned long remaining;
> +       long ret;
> +
> +       if (!timeout)
> +               return !is_active(etnaviv_obj) ? 0 : -EBUSY;
> +
> +       remaining = etnaviv_timeout_to_jiffies(timeout);
> +
> +       ret = wait_event_interruptible_timeout(gpu->fence_event,
> +                                              !is_active(etnaviv_obj),
> +                                              remaining);
> +       if (ret > 0)
> +               return 0;
> +       else if (ret == -ERESTARTSYS)
> +               return -ERESTARTSYS;
> +       else
> +               return -ETIMEDOUT;
> +}
> +
> +int etnaviv_gpu_pm_get_sync(struct etnaviv_gpu *gpu)
> +{
> +       return pm_runtime_get_sync(gpu->dev);
> +}
> +
> +void etnaviv_gpu_pm_put(struct etnaviv_gpu *gpu)
> +{
> +       pm_runtime_mark_last_busy(gpu->dev);
> +       pm_runtime_put_autosuspend(gpu->dev);
> +}
> +
> +/* add bo's to gpu's ring, and kick gpu: */
> +int etnaviv_gpu_submit(struct etnaviv_gpu *gpu,
> +       struct etnaviv_gem_submit *submit, struct etnaviv_file_private *ctx)
> +{
> +       struct drm_device *dev = gpu->drm;
> +       struct etnaviv_drm_private *priv = dev->dev_private;
> +       unsigned int event, i;
> +       int ret;
> +
> +       ret = pm_runtime_get_sync(gpu->dev);
> +       if (ret < 0)
> +               return ret;
> +
> +       /*
> +        * TODO
> +        *
> +        * - flush
> +        * - data endian
> +        * - prefetch
> +        *
> +        */
> +
> +       event = event_alloc(gpu);
> +       if (unlikely(event == ~0U)) {
> +               DRM_ERROR("no free event\n");
> +               pm_runtime_put_autosuspend(gpu->dev);
> +               return -EBUSY;
> +       }
> +
> +       submit->fence = ++priv->next_fence;
> +
> +       gpu->submitted_fence = submit->fence;
> +
> +       if (gpu->lastctx != ctx) {
> +               gpu->mmu->need_flush = true;
> +               gpu->switch_context = true;
> +               gpu->lastctx = ctx;
> +       }
> +
> +       etnaviv_buffer_queue(gpu, event, submit);
> +
> +       for (i = 0; i < submit->nr_bos; i++) {
> +               struct etnaviv_gem_object *etnaviv_obj = submit->bos[i].obj;
> +
> +               /* can't happen yet.. but when we add 2d support we'll have
> +                * to deal w/ cross-ring synchronization:
> +                */
> +               WARN_ON(is_active(etnaviv_obj) && (etnaviv_obj->gpu != gpu));
> +
> +               if (!is_active(etnaviv_obj)) {
> +                       u32 iova;
> +
> +                       /* ring takes a reference to the bo and iova: */
> +                       drm_gem_object_reference(&etnaviv_obj->base);
> +                       etnaviv_gem_get_iova_locked(gpu, &etnaviv_obj->base,
> +                                                   &iova);
> +               }
> +
> +               if (submit->bos[i].flags & (ETNA_SUBMIT_BO_READ |
> +                                           ETNA_SUBMIT_BO_WRITE))
> +                       etnaviv_gem_move_to_active(&etnaviv_obj->base, gpu,
> +                                                  submit->bos[i].flags,
> +                                                  submit->fence);
> +       }
> +       hangcheck_timer_reset(gpu);
> +
> +       return 0;
> +}
> +
> +/*
> + * Init/Cleanup:
> + */
> +static irqreturn_t irq_handler(int irq, void *data)
> +{
> +       struct etnaviv_gpu *gpu = data;
> +       irqreturn_t ret = IRQ_NONE;
> +
> +       u32 intr = gpu_read(gpu, VIVS_HI_INTR_ACKNOWLEDGE);
> +
> +       if (intr != 0) {
> +               int event;
> +
> +               pm_runtime_mark_last_busy(gpu->dev);
> +
> +               dev_dbg(gpu->dev, "intr 0x%08x\n", intr);
> +
> +               if (intr & VIVS_HI_INTR_ACKNOWLEDGE_AXI_BUS_ERROR) {
> +                       dev_err(gpu->dev, "AXI bus error\n");
> +                       intr &= ~VIVS_HI_INTR_ACKNOWLEDGE_AXI_BUS_ERROR;
> +               }
> +
> +               while ((event = ffs(intr)) != 0) {
> +                       event -= 1;
> +
> +                       intr &= ~(1 << event);
> +
> +                       dev_dbg(gpu->dev, "event %u\n", event);
> +                       /*
> +                        * Events can be processed out of order.  Eg,
> +                        * - allocate and queue event 0
> +                        * - allocate event 1
> +                        * - event 0 completes, we process it
> +                        * - allocate and queue event 0
> +                        * - event 1 and event 0 complete
> +                        * we can end up processing event 0 first, then 1.
> +                        */
> +                       if (fence_after(gpu->event[event].fence,
> +                                       gpu->completed_fence))
> +                               gpu->completed_fence = gpu->event[event].fence;
> +                       event_free(gpu, event);
> +
> +                       /*
> +                        * We need to balance the runtime PM count caused by
> +                        * each submission.  Upon submission, we increment
> +                        * the runtime PM counter, and allocate one event.
> +                        * So here, we put the runtime PM count for each
> +                        * completed event.
> +                        */
> +                       pm_runtime_put_autosuspend(gpu->dev);
> +               }
> +
> +               /* Retire the buffer objects in a work */
> +               etnaviv_queue_work(gpu->drm, &gpu->retire_work);
> +
> +               ret = IRQ_HANDLED;
> +       }
> +
> +       return ret;
> +}
> +
> +static int etnaviv_gpu_clk_enable(struct etnaviv_gpu *gpu)
> +{
> +       int ret;
> +
> +       ret = enable_clk(gpu);
> +       if (ret)
> +               return ret;
> +
> +       ret = enable_axi(gpu);
> +       if (ret) {
> +               disable_clk(gpu);
> +               return ret;
> +       }
> +
> +       return 0;
> +}
> +
> +static int etnaviv_gpu_clk_disable(struct etnaviv_gpu *gpu)
> +{
> +       int ret;
> +
> +       ret = disable_axi(gpu);
> +       if (ret)
> +               return ret;
> +
> +       ret = disable_clk(gpu);
> +       if (ret)
> +               return ret;
> +
> +       return 0;
> +}
> +
> +static int etnaviv_gpu_hw_suspend(struct etnaviv_gpu *gpu)
> +{
> +       if (gpu->buffer) {
> +               unsigned long timeout;
> +
> +               /* Replace the last WAIT with END */
> +               etnaviv_buffer_end(gpu);
> +
> +               /*
> +                * We know that only the FE is busy here, this should
> +                * happen quickly (as the WAIT is only 200 cycles).  If
> +                * we fail, just warn and continue.
> +                */
> +               timeout = jiffies + msecs_to_jiffies(100);
> +               do {
> +                       u32 idle = gpu_read(gpu, VIVS_HI_IDLE_STATE);
> +
> +                       if ((idle & gpu->idle_mask) == gpu->idle_mask)
> +                               break;
> +
> +                       if (time_is_before_jiffies(timeout)) {
> +                               dev_warn(gpu->dev,
> +                                        "timed out waiting for idle: idle=0x%x\n",
> +                                        idle);
> +                               break;
> +                       }
> +
> +                       udelay(5);
> +               } while (1);
> +       }
> +
> +       return etnaviv_gpu_clk_disable(gpu);
> +}
> +
> +#ifdef CONFIG_PM
> +static int etnaviv_gpu_hw_resume(struct etnaviv_gpu *gpu)
> +{
> +       struct drm_device *drm = gpu->drm;
> +       u32 clock;
> +       int ret;
> +
> +       ret = mutex_lock_killable(&drm->struct_mutex);
> +       if (ret)
> +               return ret;
> +
> +       clock = VIVS_HI_CLOCK_CONTROL_DISABLE_DEBUG_REGISTERS |
> +               VIVS_HI_CLOCK_CONTROL_FSCALE_VAL(0x40);
> +
> +       etnaviv_gpu_load_clock(gpu, clock);
> +       etnaviv_gpu_hw_init(gpu);
> +
> +       gpu->switch_context = true;
> +
> +       mutex_unlock(&drm->struct_mutex);
> +
> +       return 0;
> +}
> +#endif
> +
> +static int etnaviv_gpu_bind(struct device *dev, struct device *master,
> +       void *data)
> +{
> +       struct drm_device *drm = data;
> +       struct etnaviv_drm_private *priv = drm->dev_private;
> +       struct etnaviv_gpu *gpu = dev_get_drvdata(dev);
> +       int ret;
> +
> +#ifdef CONFIG_PM
> +       ret = pm_runtime_get_sync(gpu->dev);
> +#else
> +       ret = etnaviv_gpu_clk_enable(gpu);
> +#endif
> +       if (ret < 0)
> +               return ret;
> +
> +       gpu->drm = drm;
> +
> +       INIT_LIST_HEAD(&gpu->active_list);
> +       INIT_LIST_HEAD(&gpu->active_cmd_list);
> +       INIT_WORK(&gpu->retire_work, retire_worker);
> +       INIT_WORK(&gpu->recover_work, recover_worker);
> +       init_waitqueue_head(&gpu->fence_event);
> +
> +       setup_timer(&gpu->hangcheck_timer, hangcheck_handler,
> +                       (unsigned long)gpu);
> +
> +       priv->gpu[priv->num_gpus++] = gpu;
> +
> +       pm_runtime_mark_last_busy(gpu->dev);
> +       pm_runtime_put_autosuspend(gpu->dev);
> +
> +       return 0;
> +}
> +
> +static void etnaviv_gpu_unbind(struct device *dev, struct device *master,
> +       void *data)
> +{
> +       struct etnaviv_gpu *gpu = dev_get_drvdata(dev);
> +
> +       DBG("%s", dev_name(gpu->dev));
> +
> +       hangcheck_disable(gpu);
> +
> +       WARN_ON(!list_empty(&gpu->active_list));
> +
> +#ifdef CONFIG_PM
> +       pm_runtime_get_sync(gpu->dev);
> +       pm_runtime_put_sync_suspend(gpu->dev);
> +#else
> +       etnaviv_gpu_hw_suspend(gpu);
> +#endif
> +
> +       if (gpu->buffer) {
> +               etnaviv_gpu_cmdbuf_free(gpu->buffer);
> +               gpu->buffer = NULL;
> +       }
> +
> +       if (gpu->mmu) {
> +               etnaviv_iommu_destroy(gpu->mmu);
> +               gpu->mmu = NULL;
> +       }
> +
> +       gpu->drm = NULL;
> +}
> +
> +static const struct component_ops gpu_ops = {
> +       .bind = etnaviv_gpu_bind,
> +       .unbind = etnaviv_gpu_unbind,
> +};
> +
> +static const struct of_device_id etnaviv_gpu_match[] = {
> +       {
> +               .compatible = "vivante,gc"
> +       },
> +       { /* sentinel */ }
> +};
> +
> +static int etnaviv_gpu_platform_probe(struct platform_device *pdev)
> +{
> +       struct device *dev = &pdev->dev;
> +       struct etnaviv_gpu *gpu;
> +       int err = 0;
> +
> +       gpu = devm_kzalloc(dev, sizeof(*gpu), GFP_KERNEL);
> +       if (!gpu)
> +               return -ENOMEM;
> +
> +       gpu->dev = &pdev->dev;
> +
> +       /*
> +        * Set the GPU base address to the start of physical memory.  This
> +        * ensures that if we have up to 2GB, the v1 MMU can address the
> +        * highest memory.  This is important as command buffers may be
> +        * allocated outside of this limit.
> +        */
> +       gpu->memory_base = PHYS_OFFSET;
> +
> +       /* Map registers: */
> +       gpu->mmio = etnaviv_ioremap(pdev, NULL, dev_name(gpu->dev));
> +       if (IS_ERR(gpu->mmio))
> +               return PTR_ERR(gpu->mmio);
> +
> +       /* Get Interrupt: */
> +       gpu->irq = platform_get_irq(pdev, 0);
> +       if (gpu->irq < 0) {
> +               err = gpu->irq;
> +               dev_err(dev, "failed to get irq: %d\n", err);
> +               goto fail;
> +       }
> +
> +       err = devm_request_irq(&pdev->dev, gpu->irq, irq_handler, 0,
> +                              dev_name(gpu->dev), gpu);
> +       if (err) {
> +               dev_err(dev, "failed to request IRQ%u: %d\n", gpu->irq, err);
> +               goto fail;
> +       }
> +
> +       /* Get Clocks: */
> +       gpu->clk_bus = devm_clk_get(&pdev->dev, "bus");
> +       DBG("clk_bus: %p", gpu->clk_bus);
> +       if (IS_ERR(gpu->clk_bus))
> +               gpu->clk_bus = NULL;
> +
> +       gpu->clk_core = devm_clk_get(&pdev->dev, "core");
> +       DBG("clk_core: %p", gpu->clk_core);
> +       if (IS_ERR(gpu->clk_core))
> +               gpu->clk_core = NULL;
> +
> +       gpu->clk_shader = devm_clk_get(&pdev->dev, "shader");
> +       DBG("clk_shader: %p", gpu->clk_shader);
> +       if (IS_ERR(gpu->clk_shader))
> +               gpu->clk_shader = NULL;
> +
> +       /* TODO: figure out max mapped size */
> +       dev_set_drvdata(dev, gpu);
> +
> +       /*
> +        * We treat the device as initially suspended.  The runtime PM
> +        * autosuspend delay is rather arbitary: no measurements have
> +        * yet been performed to determine an appropriate value.
> +        */
> +       pm_runtime_use_autosuspend(gpu->dev);
> +       pm_runtime_set_autosuspend_delay(gpu->dev, 200);
> +       pm_runtime_enable(gpu->dev);
> +
> +       err = component_add(&pdev->dev, &gpu_ops);
> +       if (err < 0) {
> +               dev_err(&pdev->dev, "failed to register component: %d\n", err);
> +               goto fail;
> +       }
> +
> +       return 0;
> +
> +fail:
> +       return err;
> +}
> +
> +static int etnaviv_gpu_platform_remove(struct platform_device *pdev)
> +{
> +       component_del(&pdev->dev, &gpu_ops);
> +       pm_runtime_disable(&pdev->dev);
> +       return 0;
> +}
> +
> +#ifdef CONFIG_PM
> +static int etnaviv_gpu_rpm_suspend(struct device *dev)
> +{
> +       struct etnaviv_gpu *gpu = dev_get_drvdata(dev);
> +       u32 idle, mask;
> +
> +       /* If we have outstanding fences, we're not idle */
> +       if (gpu->completed_fence != gpu->submitted_fence)
> +               return -EBUSY;
> +
> +       /* Check whether the hardware (except FE) is idle */
> +       mask = gpu->idle_mask & ~VIVS_HI_IDLE_STATE_FE;
> +       idle = gpu_read(gpu, VIVS_HI_IDLE_STATE) & mask;
> +       if (idle != mask)
> +               return -EBUSY;
> +
> +       return etnaviv_gpu_hw_suspend(gpu);
> +}
> +
> +static int etnaviv_gpu_rpm_resume(struct device *dev)
> +{
> +       struct etnaviv_gpu *gpu = dev_get_drvdata(dev);
> +       int ret;
> +
> +       /* We must never runtime-PM resume holding struct_mutex */
> +       if (gpu->drm && WARN_ON_ONCE(mutex_is_locked(&gpu->drm->struct_mutex)))
> +               return -EDEADLK;
> +
> +       ret = etnaviv_gpu_clk_enable(gpu);
> +       if (ret)
> +               return ret;
> +
> +       /* Re-initialise the basic hardware state */
> +       if (gpu->drm && gpu->buffer) {
> +               ret = etnaviv_gpu_hw_resume(gpu);
> +               if (ret) {
> +                       etnaviv_gpu_clk_disable(gpu);
> +                       return ret;
> +               }
> +       }
> +
> +       return 0;
> +}
> +#endif
> +
> +static const struct dev_pm_ops etnaviv_gpu_pm_ops = {
> +       SET_RUNTIME_PM_OPS(etnaviv_gpu_rpm_suspend, etnaviv_gpu_rpm_resume,
> +                          NULL)
> +};
> +
> +struct platform_driver etnaviv_gpu_driver = {
> +       .driver = {
> +               .name = "etnaviv-gpu",
> +               .owner = THIS_MODULE,
> +               .pm = &etnaviv_gpu_pm_ops,
> +               .of_match_table = etnaviv_gpu_match,
> +       },
> +       .probe = etnaviv_gpu_platform_probe,
> +       .remove = etnaviv_gpu_platform_remove,
> +       .id_table = gpu_ids,
> +};
> diff --git a/drivers/staging/etnaviv/etnaviv_gpu.h b/drivers/staging/etnaviv/etnaviv_gpu.h
> new file mode 100644
> index 000000000000..3be5b481d8d1
> --- /dev/null
> +++ b/drivers/staging/etnaviv/etnaviv_gpu.h
> @@ -0,0 +1,198 @@
> +/*
> + * Copyright (C) 2015 Etnaviv Project
> + *
> + * This program is free software; you can redistribute it and/or modify it
> + * under the terms of the GNU General Public License version 2 as published by
> + * the Free Software Foundation.
> + *
> + * This program is distributed in the hope that it will be useful, but WITHOUT
> + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
> + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
> + * more details.
> + *
> + * You should have received a copy of the GNU General Public License along with
> + * this program.  If not, see <http://www.gnu.org/licenses/>.
> + */
> +
> +#ifndef __ETNAVIV_GPU_H__
> +#define __ETNAVIV_GPU_H__
> +
> +#include <linux/clk.h>
> +#include <linux/regulator/consumer.h>
> +
> +#include "etnaviv_drv.h"
> +
> +struct etnaviv_gem_submit;
> +
> +struct etnaviv_chip_identity {
> +       /* Chip model. */
> +       u32 model;
> +
> +       /* Revision value.*/
> +       u32 revision;
> +
> +       /* Supported feature fields. */
> +       u32 features;
> +
> +       /* Supported minor feature fields. */
> +       u32 minor_features0;
> +
> +       /* Supported minor feature 1 fields. */
> +       u32 minor_features1;
> +
> +       /* Supported minor feature 2 fields. */
> +       u32 minor_features2;
> +
> +       /* Supported minor feature 3 fields. */
> +       u32 minor_features3;
> +
> +       /* Number of streams supported. */
> +       u32 stream_count;
> +
> +       /* Total number of temporary registers per thread. */
> +       u32 register_max;
> +
> +       /* Maximum number of threads. */
> +       u32 thread_count;
> +
> +       /* Number of shader cores. */
> +       u32 shader_core_count;
> +
> +       /* Size of the vertex cache. */
> +       u32 vertex_cache_size;
> +
> +       /* Number of entries in the vertex output buffer. */
> +       u32 vertex_output_buffer_size;
> +
> +       /* Number of pixel pipes. */
> +       u32 pixel_pipes;
> +
> +       /* Number of instructions. */
> +       u32 instruction_count;
> +
> +       /* Number of constants. */
> +       u32 num_constants;
> +
> +       /* Buffer size */
> +       u32 buffer_size;
> +};
> +
> +struct etnaviv_event {
> +       bool used;
> +       u32 fence;
> +};
> +
> +struct etnaviv_cmdbuf;
> +
> +struct etnaviv_gpu {
> +       struct drm_device *drm;
> +       struct device *dev;
> +       struct etnaviv_chip_identity identity;
> +       struct etnaviv_file_private *lastctx;
> +       bool switch_context;
> +
> +       /* 'ring'-buffer: */
> +       struct etnaviv_cmdbuf *buffer;
> +
> +       /* bus base address of memory  */
> +       u32 memory_base;
> +
> +       /* event management: */
> +       struct etnaviv_event event[30];
> +       struct completion event_free;
> +       spinlock_t event_spinlock;
> +
> +       /* list of GEM active objects: */
> +       struct list_head active_list;
> +
> +       /* list of currently in-flight command buffers */
> +       struct list_head active_cmd_list;
> +
> +       u32 idle_mask;
> +
> +       /* Fencing support */
> +       u32 submitted_fence;
> +       u32 completed_fence;
> +       u32 retired_fence;
> +       wait_queue_head_t fence_event;
> +
> +       /* worker for handling active-list retiring: */
> +       struct work_struct retire_work;
> +
> +       void __iomem *mmio;
> +       int irq;
> +
> +       struct etnaviv_iommu *mmu;
> +
> +       /* Power Control: */
> +       struct clk *clk_bus;
> +       struct clk *clk_core;
> +       struct clk *clk_shader;
> +
> +       /* Hang Detction: */
> +#define DRM_ETNAVIV_HANGCHECK_PERIOD 500 /* in ms */
> +#define DRM_ETNAVIV_HANGCHECK_JIFFIES msecs_to_jiffies(DRM_ETNAVIV_HANGCHECK_PERIOD)
> +       struct timer_list hangcheck_timer;
> +       u32 hangcheck_fence;
> +       u32 hangcheck_dma_addr;
> +       struct work_struct recover_work;
> +};
> +
> +struct etnaviv_cmdbuf {
> +       /* device this cmdbuf is allocated for */
> +       struct etnaviv_gpu *gpu;
> +       /* cmdbuf properties */
> +       void *vaddr;
> +       dma_addr_t paddr;
> +       u32 size;
> +       u32 user_size;
> +       /* fence after which this buffer is to be disposed */
> +       u32 fence;
> +       /* per GPU in-flight list */
> +       struct list_head gpu_active_list;
> +};
> +
> +static inline void gpu_write(struct etnaviv_gpu *gpu, u32 reg, u32 data)
> +{
> +       etnaviv_writel(data, gpu->mmio + reg);
> +}
> +
> +static inline u32 gpu_read(struct etnaviv_gpu *gpu, u32 reg)
> +{
> +       return etnaviv_readl(gpu->mmio + reg);
> +}
> +
> +static inline bool fence_completed(struct etnaviv_gpu *gpu, u32 fence)
> +{
> +       return fence_after_eq(gpu->completed_fence, fence);
> +}
> +
> +static inline bool fence_retired(struct etnaviv_gpu *gpu, u32 fence)
> +{
> +       return fence_after_eq(gpu->retired_fence, fence);
> +}
> +
> +int etnaviv_gpu_get_param(struct etnaviv_gpu *gpu, u32 param, u64 *value);
> +
> +int etnaviv_gpu_init(struct etnaviv_gpu *gpu);
> +
> +#ifdef CONFIG_DEBUG_FS
> +int etnaviv_gpu_debugfs(struct etnaviv_gpu *gpu, struct seq_file *m);
> +#endif
> +
> +void etnaviv_gpu_retire(struct etnaviv_gpu *gpu);
> +int etnaviv_gpu_wait_fence_interruptible(struct etnaviv_gpu *gpu,
> +       u32 fence, struct timespec *timeout);
> +int etnaviv_gpu_wait_obj_inactive(struct etnaviv_gpu *gpu,
> +       struct etnaviv_gem_object *etnaviv_obj, struct timespec *timeout);
> +int etnaviv_gpu_submit(struct etnaviv_gpu *gpu,
> +       struct etnaviv_gem_submit *submit, struct etnaviv_file_private *ctx);
> +struct etnaviv_cmdbuf *etnaviv_gpu_cmdbuf_new(struct etnaviv_gpu *gpu,
> +                                             u32 size);
> +void etnaviv_gpu_cmdbuf_free(struct etnaviv_cmdbuf *cmdbuf);
> +int etnaviv_gpu_pm_get_sync(struct etnaviv_gpu *gpu);
> +void etnaviv_gpu_pm_put(struct etnaviv_gpu *gpu);
> +
> +extern struct platform_driver etnaviv_gpu_driver;
> +
> +#endif /* __ETNAVIV_GPU_H__ */
> diff --git a/drivers/staging/etnaviv/etnaviv_iommu.c b/drivers/staging/etnaviv/etnaviv_iommu.c
> new file mode 100644
> index 000000000000..9efb7d6092b4
> --- /dev/null
> +++ b/drivers/staging/etnaviv/etnaviv_iommu.c
> @@ -0,0 +1,221 @@
> +/*
> + * Copyright (C) 2014 Christian Gmeiner <christian.gmeiner@xxxxxxxxx>
> + *
> + * This program is free software; you can redistribute it and/or modify it
> + * under the terms of the GNU General Public License version 2 as published by
> + * the Free Software Foundation.
> + *
> + * This program is distributed in the hope that it will be useful, but WITHOUT
> + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
> + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
> + * more details.
> + *
> + * You should have received a copy of the GNU General Public License along with
> + * this program.  If not, see <http://www.gnu.org/licenses/>.
> + */
> +
> +#include <linux/iommu.h>
> +#include <linux/platform_device.h>
> +#include <linux/sizes.h>
> +#include <linux/slab.h>
> +#include <linux/dma-mapping.h>
> +#include <linux/bitops.h>
> +
> +#include "etnaviv_gpu.h"
> +#include "etnaviv_iommu.h"
> +#include "state_hi.xml.h"
> +
> +#define PT_SIZE                SZ_2M
> +#define PT_ENTRIES     (PT_SIZE / sizeof(u32))
> +
> +#define GPU_MEM_START  0x80000000
> +
> +struct etnaviv_iommu_domain_pgtable {
> +       u32 *pgtable;
> +       dma_addr_t paddr;
> +};
> +
> +struct etnaviv_iommu_domain {
> +       struct iommu_domain domain;
> +       struct device *dev;
> +       void *bad_page_cpu;
> +       dma_addr_t bad_page_dma;
> +       struct etnaviv_iommu_domain_pgtable pgtable;
> +       spinlock_t map_lock;
> +};
> +
> +static struct etnaviv_iommu_domain *to_etnaviv_domain(struct iommu_domain *domain)
> +{
> +       return container_of(domain, struct etnaviv_iommu_domain, domain);
> +}
> +
> +static int pgtable_alloc(struct etnaviv_iommu_domain_pgtable *pgtable,
> +                        size_t size)
> +{
> +       pgtable->pgtable = dma_alloc_coherent(NULL, size, &pgtable->paddr, GFP_KERNEL);
> +       if (!pgtable->pgtable)
> +               return -ENOMEM;
> +
> +       return 0;
> +}
> +
> +static void pgtable_free(struct etnaviv_iommu_domain_pgtable *pgtable,
> +                        size_t size)
> +{
> +       dma_free_coherent(NULL, size, pgtable->pgtable, pgtable->paddr);
> +}
> +
> +static u32 pgtable_read(struct etnaviv_iommu_domain_pgtable *pgtable,
> +                          unsigned long iova)
> +{
> +       /* calcuate index into page table */
> +       unsigned int index = (iova - GPU_MEM_START) / SZ_4K;
> +       phys_addr_t paddr;
> +
> +       paddr = pgtable->pgtable[index];
> +
> +       return paddr;
> +}
> +
> +static void pgtable_write(struct etnaviv_iommu_domain_pgtable *pgtable,
> +                         unsigned long iova, phys_addr_t paddr)
> +{
> +       /* calcuate index into page table */
> +       unsigned int index = (iova - GPU_MEM_START) / SZ_4K;
> +
> +       pgtable->pgtable[index] = paddr;
> +}
> +
> +static int __etnaviv_iommu_init(struct etnaviv_iommu_domain *etnaviv_domain)
> +{
> +       u32 *p;
> +       int ret, i;
> +
> +       etnaviv_domain->bad_page_cpu = dma_alloc_coherent(etnaviv_domain->dev,
> +                                                 SZ_4K,
> +                                                 &etnaviv_domain->bad_page_dma,
> +                                                 GFP_KERNEL);
> +       if (!etnaviv_domain->bad_page_cpu)
> +               return -ENOMEM;
> +
> +       p = etnaviv_domain->bad_page_cpu;
> +       for (i = 0; i < SZ_4K / 4; i++)
> +               *p++ = 0xdead55aa;
> +
> +       ret = pgtable_alloc(&etnaviv_domain->pgtable, PT_SIZE);
> +       if (ret < 0) {
> +               dma_free_coherent(etnaviv_domain->dev, SZ_4K,
> +                                 etnaviv_domain->bad_page_cpu,
> +                                 etnaviv_domain->bad_page_dma);
> +               return ret;
> +       }
> +
> +       for (i = 0; i < PT_ENTRIES; i++)
> +               etnaviv_domain->pgtable.pgtable[i] =
> +                       etnaviv_domain->bad_page_dma;
> +
> +       spin_lock_init(&etnaviv_domain->map_lock);
> +
> +       return 0;
> +}
> +
> +static void etnaviv_domain_free(struct iommu_domain *domain)
> +{
> +       struct etnaviv_iommu_domain *etnaviv_domain = to_etnaviv_domain(domain);
> +
> +       pgtable_free(&etnaviv_domain->pgtable, PT_SIZE);
> +       dma_free_coherent(etnaviv_domain->dev, SZ_4K,
> +                         etnaviv_domain->bad_page_cpu,
> +                         etnaviv_domain->bad_page_dma);
> +       kfree(etnaviv_domain);
> +}
> +
> +static int etnaviv_iommu_map(struct iommu_domain *domain, unsigned long iova,
> +          phys_addr_t paddr, size_t size, int prot)
> +{
> +       struct etnaviv_iommu_domain *etnaviv_domain = to_etnaviv_domain(domain);
> +
> +       if (size != SZ_4K)
> +               return -EINVAL;
> +
> +       spin_lock(&etnaviv_domain->map_lock);
> +       pgtable_write(&etnaviv_domain->pgtable, iova, paddr);
> +       spin_unlock(&etnaviv_domain->map_lock);
> +
> +       return 0;
> +}
> +
> +static size_t etnaviv_iommu_unmap(struct iommu_domain *domain,
> +       unsigned long iova, size_t size)
> +{
> +       struct etnaviv_iommu_domain *etnaviv_domain = to_etnaviv_domain(domain);
> +
> +       if (size != SZ_4K)
> +               return -EINVAL;
> +
> +       spin_lock(&etnaviv_domain->map_lock);
> +       pgtable_write(&etnaviv_domain->pgtable, iova,
> +                     etnaviv_domain->bad_page_dma);
> +       spin_unlock(&etnaviv_domain->map_lock);
> +
> +       return SZ_4K;
> +}
> +
> +static phys_addr_t etnaviv_iommu_iova_to_phys(struct iommu_domain *domain,
> +       dma_addr_t iova)
> +{
> +       struct etnaviv_iommu_domain *etnaviv_domain = to_etnaviv_domain(domain);
> +
> +       return pgtable_read(&etnaviv_domain->pgtable, iova);
> +}
> +
> +static struct iommu_ops etnaviv_iommu_ops = {
> +               .domain_free = etnaviv_domain_free,
> +               .map = etnaviv_iommu_map,
> +               .unmap = etnaviv_iommu_unmap,
> +               .iova_to_phys = etnaviv_iommu_iova_to_phys,
> +               .pgsize_bitmap = SZ_4K,
> +};
> +
> +void etnaviv_iommu_domain_restore(struct etnaviv_gpu *gpu,
> +       struct iommu_domain *domain)
> +{
> +       struct etnaviv_iommu_domain *etnaviv_domain = to_etnaviv_domain(domain);
> +       u32 pgtable;
> +
> +       /* set page table address in MC */
> +       pgtable = (u32)etnaviv_domain->pgtable.paddr;
> +
> +       gpu_write(gpu, VIVS_MC_MMU_FE_PAGE_TABLE, pgtable);
> +       gpu_write(gpu, VIVS_MC_MMU_TX_PAGE_TABLE, pgtable);
> +       gpu_write(gpu, VIVS_MC_MMU_PE_PAGE_TABLE, pgtable);
> +       gpu_write(gpu, VIVS_MC_MMU_PEZ_PAGE_TABLE, pgtable);
> +       gpu_write(gpu, VIVS_MC_MMU_RA_PAGE_TABLE, pgtable);
> +}
> +
> +struct iommu_domain *etnaviv_iommu_domain_alloc(struct etnaviv_gpu *gpu)
> +{
> +       struct etnaviv_iommu_domain *etnaviv_domain;
> +       int ret;
> +
> +       etnaviv_domain = kzalloc(sizeof(*etnaviv_domain), GFP_KERNEL);
> +       if (!etnaviv_domain)
> +               return NULL;
> +
> +       etnaviv_domain->dev = gpu->dev;
> +
> +       etnaviv_domain->domain.type = __IOMMU_DOMAIN_PAGING;
> +       etnaviv_domain->domain.ops = &etnaviv_iommu_ops;
> +       etnaviv_domain->domain.geometry.aperture_start = GPU_MEM_START;
> +       etnaviv_domain->domain.geometry.aperture_end = GPU_MEM_START + PT_ENTRIES * SZ_4K - 1;
> +
> +       ret = __etnaviv_iommu_init(etnaviv_domain);
> +       if (ret)
> +               goto out_free;
> +
> +       return &etnaviv_domain->domain;
> +
> +out_free:
> +       kfree(etnaviv_domain);
> +       return NULL;
> +}
> diff --git a/drivers/staging/etnaviv/etnaviv_iommu.h b/drivers/staging/etnaviv/etnaviv_iommu.h
> new file mode 100644
> index 000000000000..cf45503f6b6f
> --- /dev/null
> +++ b/drivers/staging/etnaviv/etnaviv_iommu.h
> @@ -0,0 +1,28 @@
> +/*
> + * Copyright (C) 2014 Christian Gmeiner <christian.gmeiner@xxxxxxxxx>
> +  *
> + * This program is free software; you can redistribute it and/or modify it
> + * under the terms of the GNU General Public License version 2 as published by
> + * the Free Software Foundation.
> + *
> + * This program is distributed in the hope that it will be useful, but WITHOUT
> + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
> + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
> + * more details.
> + *
> + * You should have received a copy of the GNU General Public License along with
> + * this program.  If not, see <http://www.gnu.org/licenses/>.
> + */
> +
> +#ifndef __ETNAVIV_IOMMU_H__
> +#define __ETNAVIV_IOMMU_H__
> +
> +#include <linux/iommu.h>
> +struct etnaviv_gpu;
> +
> +struct iommu_domain *etnaviv_iommu_domain_alloc(struct etnaviv_gpu *gpu);
> +void etnaviv_iommu_domain_restore(struct etnaviv_gpu *gpu,
> +       struct iommu_domain *domain);
> +struct iommu_domain *etnaviv_iommu_v2_domain_alloc(struct etnaviv_gpu *gpu);
> +
> +#endif /* __ETNAVIV_IOMMU_H__ */
> diff --git a/drivers/staging/etnaviv/etnaviv_iommu_v2.c b/drivers/staging/etnaviv/etnaviv_iommu_v2.c
> new file mode 100644
> index 000000000000..fbb4aed3dc80
> --- /dev/null
> +++ b/drivers/staging/etnaviv/etnaviv_iommu_v2.c
> @@ -0,0 +1,33 @@
> +/*
> + * Copyright (C) 2014 Christian Gmeiner <christian.gmeiner@xxxxxxxxx>
> +  *
> + * This program is free software; you can redistribute it and/or modify it
> + * under the terms of the GNU General Public License version 2 as published by
> + * the Free Software Foundation.
> + *
> + * This program is distributed in the hope that it will be useful, but WITHOUT
> + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
> + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
> + * more details.
> + *
> + * You should have received a copy of the GNU General Public License along with
> + * this program.  If not, see <http://www.gnu.org/licenses/>.
> + */
> +
> +#include <linux/iommu.h>
> +#include <linux/platform_device.h>
> +#include <linux/sizes.h>
> +#include <linux/slab.h>
> +#include <linux/dma-mapping.h>
> +#include <linux/bitops.h>
> +
> +#include "etnaviv_gpu.h"
> +#include "etnaviv_iommu.h"
> +#include "state_hi.xml.h"
> +
> +
> +struct iommu_domain *etnaviv_iommu_v2_domain_alloc(struct etnaviv_gpu *gpu)
> +{
> +       /* TODO */
> +       return NULL;
> +}
> diff --git a/drivers/staging/etnaviv/etnaviv_iommu_v2.h b/drivers/staging/etnaviv/etnaviv_iommu_v2.h
> new file mode 100644
> index 000000000000..603ea41c5389
> --- /dev/null
> +++ b/drivers/staging/etnaviv/etnaviv_iommu_v2.h
> @@ -0,0 +1,25 @@
> +/*
> + * Copyright (C) 2014 Christian Gmeiner <christian.gmeiner@xxxxxxxxx>
> +  *
> + * This program is free software; you can redistribute it and/or modify it
> + * under the terms of the GNU General Public License version 2 as published by
> + * the Free Software Foundation.
> + *
> + * This program is distributed in the hope that it will be useful, but WITHOUT
> + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
> + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
> + * more details.
> + *
> + * You should have received a copy of the GNU General Public License along with
> + * this program.  If not, see <http://www.gnu.org/licenses/>.
> + */
> +
> +#ifndef __ETNAVIV_IOMMU_V2_H__
> +#define __ETNAVIV_IOMMU_V2_H__
> +
> +#include <linux/iommu.h>
> +struct etnaviv_gpu;
> +
> +struct iommu_domain *etnaviv_iommu_v2_domain_alloc(struct etnaviv_gpu *gpu);
> +
> +#endif /* __ETNAVIV_IOMMU_V2_H__ */
> diff --git a/drivers/staging/etnaviv/etnaviv_mmu.c b/drivers/staging/etnaviv/etnaviv_mmu.c
> new file mode 100644
> index 000000000000..ca317f633970
> --- /dev/null
> +++ b/drivers/staging/etnaviv/etnaviv_mmu.c
> @@ -0,0 +1,282 @@
> +/*
> + * Copyright (C) 2015 Etnaviv Project
> + *
> + * This program is free software; you can redistribute it and/or modify it
> + * under the terms of the GNU General Public License version 2 as published by
> + * the Free Software Foundation.
> + *
> + * This program is distributed in the hope that it will be useful, but WITHOUT
> + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
> + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
> + * more details.
> + *
> + * You should have received a copy of the GNU General Public License along with
> + * this program.  If not, see <http://www.gnu.org/licenses/>.
> + */
> +
> +#include "etnaviv_drv.h"
> +#include "etnaviv_gem.h"
> +#include "etnaviv_mmu.h"
> +
> +static int etnaviv_fault_handler(struct iommu_domain *iommu, struct device *dev,
> +               unsigned long iova, int flags, void *arg)
> +{
> +       DBG("*** fault: iova=%08lx, flags=%d", iova, flags);
> +       return 0;
> +}
> +
> +int etnaviv_iommu_map(struct etnaviv_iommu *iommu, u32 iova,
> +               struct sg_table *sgt, unsigned len, int prot)
> +{
> +       struct iommu_domain *domain = iommu->domain;
> +       struct scatterlist *sg;
> +       unsigned int da = iova;
> +       unsigned int i, j;
> +       int ret;
> +
> +       if (!domain || !sgt)
> +               return -EINVAL;
> +
> +       for_each_sg(sgt->sgl, sg, sgt->nents, i) {
> +               u32 pa = sg_dma_address(sg) - sg->offset;
> +               size_t bytes = sg_dma_len(sg) + sg->offset;
> +
> +               VERB("map[%d]: %08x %08x(%zx)", i, iova, pa, bytes);
> +
> +               ret = iommu_map(domain, da, pa, bytes, prot);
> +               if (ret)
> +                       goto fail;
> +
> +               da += bytes;
> +       }
> +
> +       return 0;
> +
> +fail:
> +       da = iova;
> +
> +       for_each_sg(sgt->sgl, sg, i, j) {
> +               size_t bytes = sg_dma_len(sg) + sg->offset;
> +
> +               iommu_unmap(domain, da, bytes);
> +               da += bytes;
> +       }
> +       return ret;
> +}
> +
> +int etnaviv_iommu_unmap(struct etnaviv_iommu *iommu, u32 iova,
> +               struct sg_table *sgt, unsigned len)
> +{
> +       struct iommu_domain *domain = iommu->domain;
> +       struct scatterlist *sg;
> +       unsigned int da = iova;
> +       int i;
> +
> +       for_each_sg(sgt->sgl, sg, sgt->nents, i) {
> +               size_t bytes = sg_dma_len(sg) + sg->offset;
> +               size_t unmapped;
> +
> +               unmapped = iommu_unmap(domain, da, bytes);
> +               if (unmapped < bytes)
> +                       return unmapped;
> +
> +               VERB("unmap[%d]: %08x(%zx)", i, iova, bytes);
> +
> +               BUG_ON(!PAGE_ALIGNED(bytes));
> +
> +               da += bytes;
> +       }
> +
> +       return 0;
> +}
> +
> +int etnaviv_iommu_map_gem(struct etnaviv_iommu *mmu,
> +       struct etnaviv_gem_object *etnaviv_obj, u32 memory_base,
> +       struct etnaviv_vram_mapping **out_mapping)
> +{
> +       struct etnaviv_drm_private *priv = etnaviv_obj->base.dev->dev_private;
> +       struct sg_table *sgt = etnaviv_obj->sgt;
> +       struct etnaviv_vram_mapping *mapping, *free = NULL;
> +       struct drm_mm_node *node;
> +       int ret;
> +
> +       mapping = kzalloc(sizeof(*mapping), GFP_KERNEL);
> +       if (!mapping)
> +               return -ENOMEM;
> +
> +       INIT_LIST_HEAD(&mapping->scan_node);
> +       mapping->object = etnaviv_obj;
> +       mapping->mmu = mmu;
> +
> +       /* v1 MMU can optimize single entry (contiguous) scatterlists */
> +       if (sgt->nents == 1 && !(etnaviv_obj->flags & ETNA_BO_FORCE_MMU)) {
> +               u32 iova;
> +
> +               iova = sg_dma_address(sgt->sgl) - memory_base;
> +               if (iova < 0x80000000 - sg_dma_len(sgt->sgl)) {
> +                       mapping->iova = iova;
> +                       list_add_tail(&mapping->obj_node,
> +                                     &etnaviv_obj->vram_list);
> +                       if (out_mapping)
> +                               *out_mapping = mapping;
> +                       return 0;
> +               }
> +       }
> +
> +       node = &mapping->vram_node;
> +       while (1) {
> +               struct etnaviv_gem_object *o;
> +               struct etnaviv_vram_mapping *m, *n;
> +               struct list_head list;
> +               bool found;
> +
> +               ret = drm_mm_insert_node_in_range(&mmu->mm, node,
> +                       etnaviv_obj->base.size, 0, mmu->last_iova, ~0UL,
> +                       DRM_MM_SEARCH_DEFAULT);
> +
> +               if (ret != -ENOSPC)
> +                       break;
> +
> +               /*
> +                * If we did not search from the start of the MMU region,
> +                * try again in case there are free slots.
> +                */
> +               if (mmu->last_iova) {
> +                       mmu->last_iova = 0;
> +                       mmu->need_flush = true;
> +                       continue;
> +               }
> +
> +               /* Try to retire some entries */
> +               drm_mm_init_scan(&mmu->mm, etnaviv_obj->base.size, 0, 0);
> +
> +               found = 0;
> +               INIT_LIST_HEAD(&list);
> +               list_for_each_entry(o, &priv->inactive_list, mm_list) {
> +                       free = etnaviv_gem_get_vram_mapping(o, mmu);
> +                       if (!free)
> +                               continue;
> +
> +                       /*
> +                        * If this vram node has not been used, skip this.
> +                        */
> +                       if (!free->vram_node.mm)
> +                               continue;
> +
> +                       /*
> +                        * If it's on the submit list, then it is part of
> +                        * a submission, and we want to keep its entry.
> +                        */
> +                       if (!list_empty(&o->submit_entry))
> +                               continue;
> +
> +                       list_add(&free->scan_node, &list);
> +                       if (drm_mm_scan_add_block(&free->vram_node)) {
> +                               found = true;
> +                               break;
> +                       }
> +               }
> +
> +               if (!found) {
> +                       /* Nothing found, clean up and fail */
> +                       list_for_each_entry_safe(m, n, &list, scan_node)
> +                               BUG_ON(drm_mm_scan_remove_block(&m->vram_node));
> +                       break;
> +               }
> +
> +               /*
> +                * drm_mm does not allow any other operations while
> +                * scanning, so we have to remove all blocks first.
> +                * If drm_mm_scan_remove_block() returns false, we
> +                * can leave the block pinned.
> +                */
> +               list_for_each_entry_safe(m, n, &list, scan_node)
> +                       if (!drm_mm_scan_remove_block(&m->vram_node))
> +                               list_del_init(&m->scan_node);
> +
> +               list_for_each_entry_safe(m, n, &list, scan_node) {
> +                       list_del_init(&m->scan_node);
> +                       etnaviv_iommu_unmap_gem(m);
> +               }
> +
> +               /*
> +                * We removed enough mappings so that the new allocation will
> +                * succeed.  Ensure that the MMU will be flushed and retry
> +                * the allocation one more time.
> +                */
> +               mmu->need_flush = true;
> +       }
> +
> +       if (ret < 0) {
> +               kfree(mapping);
> +               return ret;
> +       }
> +
> +       mmu->last_iova = node->start + etnaviv_obj->base.size;
> +       mapping->iova = node->start;
> +       ret = etnaviv_iommu_map(mmu, node->start, sgt, etnaviv_obj->base.size,
> +                               IOMMU_READ | IOMMU_WRITE);
> +
> +       if (ret < 0) {
> +               drm_mm_remove_node(node);
> +               kfree(mapping);
> +               return ret;
> +       }
> +
> +       list_add_tail(&mapping->obj_node, &etnaviv_obj->vram_list);
> +       if (out_mapping)
> +               *out_mapping = mapping;
> +
> +       return ret;
> +}
> +
> +void etnaviv_iommu_unmap_gem(struct etnaviv_vram_mapping *mapping)
> +{
> +       struct etnaviv_iommu *mmu;
> +       struct etnaviv_gem_object *etnaviv_obj;
> +
> +       if (!mapping)
> +               return;
> +
> +       mmu = mapping->mmu;
> +
> +       /* If the vram node is on the mm, unmap and remove the node */
> +       if (mapping->vram_node.mm == &mmu->mm) {
> +               etnaviv_obj = mapping->object;
> +               etnaviv_iommu_unmap(mmu, mapping->vram_node.start,
> +                                   etnaviv_obj->sgt, etnaviv_obj->base.size);
> +               drm_mm_remove_node(&mapping->vram_node);
> +       }
> +
> +       list_del(&mapping->obj_node);
> +       kfree(mapping);
> +}
> +
> +void etnaviv_iommu_destroy(struct etnaviv_iommu *mmu)
> +{
> +       drm_mm_takedown(&mmu->mm);
> +       iommu_domain_free(mmu->domain);
> +       kfree(mmu);
> +}
> +
> +struct etnaviv_iommu *etnaviv_iommu_new(struct device *dev,
> +       struct iommu_domain *domain, enum etnaviv_iommu_version version)
> +{
> +       struct etnaviv_iommu *mmu;
> +
> +       mmu = kzalloc(sizeof(*mmu), GFP_KERNEL);
> +       if (!mmu)
> +               return ERR_PTR(-ENOMEM);
> +
> +       mmu->domain = domain;
> +       mmu->dev = dev;
> +       mmu->version = version;
> +
> +       drm_mm_init(&mmu->mm, domain->geometry.aperture_start,
> +                   domain->geometry.aperture_end -
> +                     domain->geometry.aperture_start + 1);
> +
> +       iommu_set_fault_handler(domain, etnaviv_fault_handler, dev);
> +
> +       return mmu;
> +}
> diff --git a/drivers/staging/etnaviv/etnaviv_mmu.h b/drivers/staging/etnaviv/etnaviv_mmu.h
> new file mode 100644
> index 000000000000..444ef296d2b4
> --- /dev/null
> +++ b/drivers/staging/etnaviv/etnaviv_mmu.h
> @@ -0,0 +1,58 @@
> +/*
> + * Copyright (C) 2015 Etnaviv Project
> + *
> + * This program is free software; you can redistribute it and/or modify it
> + * under the terms of the GNU General Public License version 2 as published by
> + * the Free Software Foundation.
> + *
> + * This program is distributed in the hope that it will be useful, but WITHOUT
> + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
> + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
> + * more details.
> + *
> + * You should have received a copy of the GNU General Public License along with
> + * this program.  If not, see <http://www.gnu.org/licenses/>.
> + */
> +
> +#ifndef __ETNAVIV_MMU_H__
> +#define __ETNAVIV_MMU_H__
> +
> +#include <linux/iommu.h>
> +
> +enum etnaviv_iommu_version {
> +       ETNAVIV_IOMMU_V1 = 0,
> +       ETNAVIV_IOMMU_V2,
> +};
> +
> +struct etnaviv_vram_mapping;
> +
> +struct etnaviv_iommu {
> +       struct device *dev;
> +       struct iommu_domain *domain;
> +
> +       enum etnaviv_iommu_version version;
> +
> +       /* memory manager for GPU address area */
> +       struct drm_mm mm;
> +       u32 last_iova;
> +       bool need_flush;
> +};
> +
> +struct etnaviv_gem_object;
> +
> +int etnaviv_iommu_attach(struct etnaviv_iommu *iommu, const char **names,
> +       int cnt);
> +int etnaviv_iommu_map(struct etnaviv_iommu *iommu, u32 iova,
> +       struct sg_table *sgt, unsigned len, int prot);
> +int etnaviv_iommu_unmap(struct etnaviv_iommu *iommu, u32 iova,
> +       struct sg_table *sgt, unsigned len);
> +int etnaviv_iommu_map_gem(struct etnaviv_iommu *mmu,
> +       struct etnaviv_gem_object *etnaviv_obj, u32 memory_base,
> +       struct etnaviv_vram_mapping **mapping);
> +void etnaviv_iommu_unmap_gem(struct etnaviv_vram_mapping *mapping);
> +void etnaviv_iommu_destroy(struct etnaviv_iommu *iommu);
> +
> +struct etnaviv_iommu *etnaviv_iommu_new(struct device *dev,
> +       struct iommu_domain *domain, enum etnaviv_iommu_version version);
> +
> +#endif /* __ETNAVIV_MMU_H__ */
> diff --git a/drivers/staging/etnaviv/state.xml.h b/drivers/staging/etnaviv/state.xml.h
> new file mode 100644
> index 000000000000..368218304566
> --- /dev/null
> +++ b/drivers/staging/etnaviv/state.xml.h
> @@ -0,0 +1,351 @@
> +#ifndef STATE_XML
> +#define STATE_XML
> +
> +/* Autogenerated file, DO NOT EDIT manually!
> +
> +This file was generated by the rules-ng-ng headergen tool in this git repository:
> +http://0x04.net/cgit/index.cgi/rules-ng-ng
> +git clone git://0x04.net/rules-ng-ng
> +
> +The rules-ng-ng source files this header was generated from are:
> +- state.xml    (  18882 bytes, from 2015-03-25 11:42:32)
> +- common.xml   (  18437 bytes, from 2015-03-25 11:27:41)
> +- state_hi.xml (  23420 bytes, from 2015-03-25 11:47:21)
> +- state_2d.xml (  51549 bytes, from 2015-03-25 11:25:06)
> +- state_3d.xml (  54600 bytes, from 2015-03-25 11:25:19)
> +- state_vg.xml (   5973 bytes, from 2015-03-25 11:26:01)
> +
> +Copyright (C) 2015
> +*/
> +
> +
> +#define VARYING_COMPONENT_USE_UNUSED                           0x00000000
> +#define VARYING_COMPONENT_USE_USED                             0x00000001
> +#define VARYING_COMPONENT_USE_POINTCOORD_X                     0x00000002
> +#define VARYING_COMPONENT_USE_POINTCOORD_Y                     0x00000003
> +#define FE_VERTEX_STREAM_CONTROL_VERTEX_STRIDE__MASK           0x000000ff
> +#define FE_VERTEX_STREAM_CONTROL_VERTEX_STRIDE__SHIFT          0
> +#define FE_VERTEX_STREAM_CONTROL_VERTEX_STRIDE(x)              (((x) << FE_VERTEX_STREAM_CONTROL_VERTEX_STRIDE__SHIFT) & FE_VERTEX_STREAM_CONTROL_VERTEX_STRIDE__MASK)
> +#define VIVS_FE                                                        0x00000000
> +
> +#define VIVS_FE_VERTEX_ELEMENT_CONFIG(i0)                     (0x00000600 + 0x4*(i0))
> +#define VIVS_FE_VERTEX_ELEMENT_CONFIG__ESIZE                   0x00000004
> +#define VIVS_FE_VERTEX_ELEMENT_CONFIG__LEN                     0x00000010
> +#define VIVS_FE_VERTEX_ELEMENT_CONFIG_TYPE__MASK               0x0000000f
> +#define VIVS_FE_VERTEX_ELEMENT_CONFIG_TYPE__SHIFT              0
> +#define VIVS_FE_VERTEX_ELEMENT_CONFIG_TYPE_BYTE                        0x00000000
> +#define VIVS_FE_VERTEX_ELEMENT_CONFIG_TYPE_UNSIGNED_BYTE       0x00000001
> +#define VIVS_FE_VERTEX_ELEMENT_CONFIG_TYPE_SHORT               0x00000002
> +#define VIVS_FE_VERTEX_ELEMENT_CONFIG_TYPE_UNSIGNED_SHORT      0x00000003
> +#define VIVS_FE_VERTEX_ELEMENT_CONFIG_TYPE_INT                 0x00000004
> +#define VIVS_FE_VERTEX_ELEMENT_CONFIG_TYPE_UNSIGNED_INT                0x00000005
> +#define VIVS_FE_VERTEX_ELEMENT_CONFIG_TYPE_FLOAT               0x00000008
> +#define VIVS_FE_VERTEX_ELEMENT_CONFIG_TYPE_HALF_FLOAT          0x00000009
> +#define VIVS_FE_VERTEX_ELEMENT_CONFIG_TYPE_FIXED               0x0000000b
> +#define VIVS_FE_VERTEX_ELEMENT_CONFIG_TYPE_INT_10_10_10_2      0x0000000c
> +#define VIVS_FE_VERTEX_ELEMENT_CONFIG_TYPE_UNSIGNED_INT_10_10_10_2     0x0000000d
> +#define VIVS_FE_VERTEX_ELEMENT_CONFIG_ENDIAN__MASK             0x00000030
> +#define VIVS_FE_VERTEX_ELEMENT_CONFIG_ENDIAN__SHIFT            4
> +#define VIVS_FE_VERTEX_ELEMENT_CONFIG_ENDIAN(x)                        (((x) << VIVS_FE_VERTEX_ELEMENT_CONFIG_ENDIAN__SHIFT) & VIVS_FE_VERTEX_ELEMENT_CONFIG_ENDIAN__MASK)
> +#define VIVS_FE_VERTEX_ELEMENT_CONFIG_NONCONSECUTIVE           0x00000080
> +#define VIVS_FE_VERTEX_ELEMENT_CONFIG_STREAM__MASK             0x00000700
> +#define VIVS_FE_VERTEX_ELEMENT_CONFIG_STREAM__SHIFT            8
> +#define VIVS_FE_VERTEX_ELEMENT_CONFIG_STREAM(x)                        (((x) << VIVS_FE_VERTEX_ELEMENT_CONFIG_STREAM__SHIFT) & VIVS_FE_VERTEX_ELEMENT_CONFIG_STREAM__MASK)
> +#define VIVS_FE_VERTEX_ELEMENT_CONFIG_NUM__MASK                        0x00003000
> +#define VIVS_FE_VERTEX_ELEMENT_CONFIG_NUM__SHIFT               12
> +#define VIVS_FE_VERTEX_ELEMENT_CONFIG_NUM(x)                   (((x) << VIVS_FE_VERTEX_ELEMENT_CONFIG_NUM__SHIFT) & VIVS_FE_VERTEX_ELEMENT_CONFIG_NUM__MASK)
> +#define VIVS_FE_VERTEX_ELEMENT_CONFIG_NORMALIZE__MASK          0x0000c000
> +#define VIVS_FE_VERTEX_ELEMENT_CONFIG_NORMALIZE__SHIFT         14
> +#define VIVS_FE_VERTEX_ELEMENT_CONFIG_NORMALIZE_OFF            0x00000000
> +#define VIVS_FE_VERTEX_ELEMENT_CONFIG_NORMALIZE_ON             0x00008000
> +#define VIVS_FE_VERTEX_ELEMENT_CONFIG_START__MASK              0x00ff0000
> +#define VIVS_FE_VERTEX_ELEMENT_CONFIG_START__SHIFT             16
> +#define VIVS_FE_VERTEX_ELEMENT_CONFIG_START(x)                 (((x) << VIVS_FE_VERTEX_ELEMENT_CONFIG_START__SHIFT) & VIVS_FE_VERTEX_ELEMENT_CONFIG_START__MASK)
> +#define VIVS_FE_VERTEX_ELEMENT_CONFIG_END__MASK                        0xff000000
> +#define VIVS_FE_VERTEX_ELEMENT_CONFIG_END__SHIFT               24
> +#define VIVS_FE_VERTEX_ELEMENT_CONFIG_END(x)                   (((x) << VIVS_FE_VERTEX_ELEMENT_CONFIG_END__SHIFT) & VIVS_FE_VERTEX_ELEMENT_CONFIG_END__MASK)
> +
> +#define VIVS_FE_CMD_STREAM_BASE_ADDR                           0x00000640
> +
> +#define VIVS_FE_INDEX_STREAM_BASE_ADDR                         0x00000644
> +
> +#define VIVS_FE_INDEX_STREAM_CONTROL                           0x00000648
> +#define VIVS_FE_INDEX_STREAM_CONTROL_TYPE__MASK                        0x00000003
> +#define VIVS_FE_INDEX_STREAM_CONTROL_TYPE__SHIFT               0
> +#define VIVS_FE_INDEX_STREAM_CONTROL_TYPE_UNSIGNED_CHAR                0x00000000
> +#define VIVS_FE_INDEX_STREAM_CONTROL_TYPE_UNSIGNED_SHORT       0x00000001
> +#define VIVS_FE_INDEX_STREAM_CONTROL_TYPE_UNSIGNED_INT         0x00000002
> +
> +#define VIVS_FE_VERTEX_STREAM_BASE_ADDR                                0x0000064c
> +
> +#define VIVS_FE_VERTEX_STREAM_CONTROL                          0x00000650
> +
> +#define VIVS_FE_COMMAND_ADDRESS                                        0x00000654
> +
> +#define VIVS_FE_COMMAND_CONTROL                                        0x00000658
> +#define VIVS_FE_COMMAND_CONTROL_PREFETCH__MASK                 0x0000ffff
> +#define VIVS_FE_COMMAND_CONTROL_PREFETCH__SHIFT                        0
> +#define VIVS_FE_COMMAND_CONTROL_PREFETCH(x)                    (((x) << VIVS_FE_COMMAND_CONTROL_PREFETCH__SHIFT) & VIVS_FE_COMMAND_CONTROL_PREFETCH__MASK)
> +#define VIVS_FE_COMMAND_CONTROL_ENABLE                         0x00010000
> +
> +#define VIVS_FE_DMA_STATUS                                     0x0000065c
> +
> +#define VIVS_FE_DMA_DEBUG_STATE                                        0x00000660
> +#define VIVS_FE_DMA_DEBUG_STATE_CMD_STATE__MASK                        0x0000001f
> +#define VIVS_FE_DMA_DEBUG_STATE_CMD_STATE__SHIFT               0
> +#define VIVS_FE_DMA_DEBUG_STATE_CMD_STATE_IDLE                 0x00000000
> +#define VIVS_FE_DMA_DEBUG_STATE_CMD_STATE_DEC                  0x00000001
> +#define VIVS_FE_DMA_DEBUG_STATE_CMD_STATE_ADR0                 0x00000002
> +#define VIVS_FE_DMA_DEBUG_STATE_CMD_STATE_LOAD0                        0x00000003
> +#define VIVS_FE_DMA_DEBUG_STATE_CMD_STATE_ADR1                 0x00000004
> +#define VIVS_FE_DMA_DEBUG_STATE_CMD_STATE_LOAD1                        0x00000005
> +#define VIVS_FE_DMA_DEBUG_STATE_CMD_STATE_3DADR                        0x00000006
> +#define VIVS_FE_DMA_DEBUG_STATE_CMD_STATE_3DCMD                        0x00000007
> +#define VIVS_FE_DMA_DEBUG_STATE_CMD_STATE_3DCNTL               0x00000008
> +#define VIVS_FE_DMA_DEBUG_STATE_CMD_STATE_3DIDXCNTL            0x00000009
> +#define VIVS_FE_DMA_DEBUG_STATE_CMD_STATE_INITREQDMA           0x0000000a
> +#define VIVS_FE_DMA_DEBUG_STATE_CMD_STATE_DRAWIDX              0x0000000b
> +#define VIVS_FE_DMA_DEBUG_STATE_CMD_STATE_DRAW                 0x0000000c
> +#define VIVS_FE_DMA_DEBUG_STATE_CMD_STATE_2DRECT0              0x0000000d
> +#define VIVS_FE_DMA_DEBUG_STATE_CMD_STATE_2DRECT1              0x0000000e
> +#define VIVS_FE_DMA_DEBUG_STATE_CMD_STATE_2DDATA0              0x0000000f
> +#define VIVS_FE_DMA_DEBUG_STATE_CMD_STATE_2DDATA1              0x00000010
> +#define VIVS_FE_DMA_DEBUG_STATE_CMD_STATE_WAITFIFO             0x00000011
> +#define VIVS_FE_DMA_DEBUG_STATE_CMD_STATE_WAIT                 0x00000012
> +#define VIVS_FE_DMA_DEBUG_STATE_CMD_STATE_LINK                 0x00000013
> +#define VIVS_FE_DMA_DEBUG_STATE_CMD_STATE_END                  0x00000014
> +#define VIVS_FE_DMA_DEBUG_STATE_CMD_STATE_STALL                        0x00000015
> +#define VIVS_FE_DMA_DEBUG_STATE_CMD_DMA_STATE__MASK            0x00000300
> +#define VIVS_FE_DMA_DEBUG_STATE_CMD_DMA_STATE__SHIFT           8
> +#define VIVS_FE_DMA_DEBUG_STATE_CMD_DMA_STATE_IDLE             0x00000000
> +#define VIVS_FE_DMA_DEBUG_STATE_CMD_DMA_STATE_START            0x00000100
> +#define VIVS_FE_DMA_DEBUG_STATE_CMD_DMA_STATE_REQ              0x00000200
> +#define VIVS_FE_DMA_DEBUG_STATE_CMD_DMA_STATE_END              0x00000300
> +#define VIVS_FE_DMA_DEBUG_STATE_CMD_FETCH_STATE__MASK          0x00000c00
> +#define VIVS_FE_DMA_DEBUG_STATE_CMD_FETCH_STATE__SHIFT         10
> +#define VIVS_FE_DMA_DEBUG_STATE_CMD_FETCH_STATE_IDLE           0x00000000
> +#define VIVS_FE_DMA_DEBUG_STATE_CMD_FETCH_STATE_RAMVALID       0x00000400
> +#define VIVS_FE_DMA_DEBUG_STATE_CMD_FETCH_STATE_VALID          0x00000800
> +#define VIVS_FE_DMA_DEBUG_STATE_REQ_DMA_STATE__MASK            0x00003000
> +#define VIVS_FE_DMA_DEBUG_STATE_REQ_DMA_STATE__SHIFT           12
> +#define VIVS_FE_DMA_DEBUG_STATE_REQ_DMA_STATE_IDLE             0x00000000
> +#define VIVS_FE_DMA_DEBUG_STATE_REQ_DMA_STATE_WAITIDX          0x00001000
> +#define VIVS_FE_DMA_DEBUG_STATE_REQ_DMA_STATE_CAL              0x00002000
> +#define VIVS_FE_DMA_DEBUG_STATE_CAL_STATE__MASK                        0x0000c000
> +#define VIVS_FE_DMA_DEBUG_STATE_CAL_STATE__SHIFT               14
> +#define VIVS_FE_DMA_DEBUG_STATE_CAL_STATE_IDLE                 0x00000000
> +#define VIVS_FE_DMA_DEBUG_STATE_CAL_STATE_LDADR                        0x00004000
> +#define VIVS_FE_DMA_DEBUG_STATE_CAL_STATE_IDXCALC              0x00008000
> +#define VIVS_FE_DMA_DEBUG_STATE_VE_REQ_STATE__MASK             0x00030000
> +#define VIVS_FE_DMA_DEBUG_STATE_VE_REQ_STATE__SHIFT            16
> +#define VIVS_FE_DMA_DEBUG_STATE_VE_REQ_STATE_IDLE              0x00000000
> +#define VIVS_FE_DMA_DEBUG_STATE_VE_REQ_STATE_CKCACHE           0x00010000
> +#define VIVS_FE_DMA_DEBUG_STATE_VE_REQ_STATE_MISS              0x00020000
> +
> +#define VIVS_FE_DMA_ADDRESS                                    0x00000664
> +
> +#define VIVS_FE_DMA_LOW                                                0x00000668
> +
> +#define VIVS_FE_DMA_HIGH                                       0x0000066c
> +
> +#define VIVS_FE_AUTO_FLUSH                                     0x00000670
> +
> +#define VIVS_FE_UNK00678                                       0x00000678
> +
> +#define VIVS_FE_UNK0067C                                       0x0000067c
> +
> +#define VIVS_FE_VERTEX_STREAMS(i0)                            (0x00000000 + 0x4*(i0))
> +#define VIVS_FE_VERTEX_STREAMS__ESIZE                          0x00000004
> +#define VIVS_FE_VERTEX_STREAMS__LEN                            0x00000008
> +
> +#define VIVS_FE_VERTEX_STREAMS_BASE_ADDR(i0)                  (0x00000680 + 0x4*(i0))
> +
> +#define VIVS_FE_VERTEX_STREAMS_CONTROL(i0)                    (0x000006a0 + 0x4*(i0))
> +
> +#define VIVS_FE_UNK00700(i0)                                  (0x00000700 + 0x4*(i0))
> +#define VIVS_FE_UNK00700__ESIZE                                        0x00000004
> +#define VIVS_FE_UNK00700__LEN                                  0x00000010
> +
> +#define VIVS_FE_UNK00740(i0)                                  (0x00000740 + 0x4*(i0))
> +#define VIVS_FE_UNK00740__ESIZE                                        0x00000004
> +#define VIVS_FE_UNK00740__LEN                                  0x00000010
> +
> +#define VIVS_FE_UNK00780(i0)                                  (0x00000780 + 0x4*(i0))
> +#define VIVS_FE_UNK00780__ESIZE                                        0x00000004
> +#define VIVS_FE_UNK00780__LEN                                  0x00000010
> +
> +#define VIVS_GL                                                        0x00000000
> +
> +#define VIVS_GL_PIPE_SELECT                                    0x00003800
> +#define VIVS_GL_PIPE_SELECT_PIPE__MASK                         0x00000001
> +#define VIVS_GL_PIPE_SELECT_PIPE__SHIFT                                0
> +#define VIVS_GL_PIPE_SELECT_PIPE(x)                            (((x) << VIVS_GL_PIPE_SELECT_PIPE__SHIFT) & VIVS_GL_PIPE_SELECT_PIPE__MASK)
> +
> +#define VIVS_GL_EVENT                                          0x00003804
> +#define VIVS_GL_EVENT_EVENT_ID__MASK                           0x0000001f
> +#define VIVS_GL_EVENT_EVENT_ID__SHIFT                          0
> +#define VIVS_GL_EVENT_EVENT_ID(x)                              (((x) << VIVS_GL_EVENT_EVENT_ID__SHIFT) & VIVS_GL_EVENT_EVENT_ID__MASK)
> +#define VIVS_GL_EVENT_FROM_FE                                  0x00000020
> +#define VIVS_GL_EVENT_FROM_PE                                  0x00000040
> +#define VIVS_GL_EVENT_SOURCE__MASK                             0x00001f00
> +#define VIVS_GL_EVENT_SOURCE__SHIFT                            8
> +#define VIVS_GL_EVENT_SOURCE(x)                                        (((x) << VIVS_GL_EVENT_SOURCE__SHIFT) & VIVS_GL_EVENT_SOURCE__MASK)
> +
> +#define VIVS_GL_SEMAPHORE_TOKEN                                        0x00003808
> +#define VIVS_GL_SEMAPHORE_TOKEN_FROM__MASK                     0x0000001f
> +#define VIVS_GL_SEMAPHORE_TOKEN_FROM__SHIFT                    0
> +#define VIVS_GL_SEMAPHORE_TOKEN_FROM(x)                                (((x) << VIVS_GL_SEMAPHORE_TOKEN_FROM__SHIFT) & VIVS_GL_SEMAPHORE_TOKEN_FROM__MASK)
> +#define VIVS_GL_SEMAPHORE_TOKEN_TO__MASK                       0x00001f00
> +#define VIVS_GL_SEMAPHORE_TOKEN_TO__SHIFT                      8
> +#define VIVS_GL_SEMAPHORE_TOKEN_TO(x)                          (((x) << VIVS_GL_SEMAPHORE_TOKEN_TO__SHIFT) & VIVS_GL_SEMAPHORE_TOKEN_TO__MASK)
> +
> +#define VIVS_GL_FLUSH_CACHE                                    0x0000380c
> +#define VIVS_GL_FLUSH_CACHE_DEPTH                              0x00000001
> +#define VIVS_GL_FLUSH_CACHE_COLOR                              0x00000002
> +#define VIVS_GL_FLUSH_CACHE_TEXTURE                            0x00000004
> +#define VIVS_GL_FLUSH_CACHE_PE2D                               0x00000008
> +#define VIVS_GL_FLUSH_CACHE_TEXTUREVS                          0x00000010
> +#define VIVS_GL_FLUSH_CACHE_SHADER_L1                          0x00000020
> +#define VIVS_GL_FLUSH_CACHE_SHADER_L2                          0x00000040
> +
> +#define VIVS_GL_FLUSH_MMU                                      0x00003810
> +#define VIVS_GL_FLUSH_MMU_FLUSH_FEMMU                          0x00000001
> +#define VIVS_GL_FLUSH_MMU_FLUSH_UNK1                           0x00000002
> +#define VIVS_GL_FLUSH_MMU_FLUSH_UNK2                           0x00000004
> +#define VIVS_GL_FLUSH_MMU_FLUSH_PEMMU                          0x00000008
> +#define VIVS_GL_FLUSH_MMU_FLUSH_UNK4                           0x00000010
> +
> +#define VIVS_GL_VERTEX_ELEMENT_CONFIG                          0x00003814
> +
> +#define VIVS_GL_MULTI_SAMPLE_CONFIG                            0x00003818
> +#define VIVS_GL_MULTI_SAMPLE_CONFIG_MSAA_SAMPLES__MASK         0x00000003
> +#define VIVS_GL_MULTI_SAMPLE_CONFIG_MSAA_SAMPLES__SHIFT                0
> +#define VIVS_GL_MULTI_SAMPLE_CONFIG_MSAA_SAMPLES_NONE          0x00000000
> +#define VIVS_GL_MULTI_SAMPLE_CONFIG_MSAA_SAMPLES_2X            0x00000001
> +#define VIVS_GL_MULTI_SAMPLE_CONFIG_MSAA_SAMPLES_4X            0x00000002
> +#define VIVS_GL_MULTI_SAMPLE_CONFIG_MSAA_SAMPLES_MASK          0x00000008
> +#define VIVS_GL_MULTI_SAMPLE_CONFIG_MSAA_ENABLES__MASK         0x000000f0
> +#define VIVS_GL_MULTI_SAMPLE_CONFIG_MSAA_ENABLES__SHIFT                4
> +#define VIVS_GL_MULTI_SAMPLE_CONFIG_MSAA_ENABLES(x)            (((x) << VIVS_GL_MULTI_SAMPLE_CONFIG_MSAA_ENABLES__SHIFT) & VIVS_GL_MULTI_SAMPLE_CONFIG_MSAA_ENABLES__MASK)
> +#define VIVS_GL_MULTI_SAMPLE_CONFIG_MSAA_ENABLES_MASK          0x00000100
> +#define VIVS_GL_MULTI_SAMPLE_CONFIG_UNK12__MASK                        0x00007000
> +#define VIVS_GL_MULTI_SAMPLE_CONFIG_UNK12__SHIFT               12
> +#define VIVS_GL_MULTI_SAMPLE_CONFIG_UNK12(x)                   (((x) << VIVS_GL_MULTI_SAMPLE_CONFIG_UNK12__SHIFT) & VIVS_GL_MULTI_SAMPLE_CONFIG_UNK12__MASK)
> +#define VIVS_GL_MULTI_SAMPLE_CONFIG_UNK12_MASK                 0x00008000
> +#define VIVS_GL_MULTI_SAMPLE_CONFIG_UNK16__MASK                        0x00030000
> +#define VIVS_GL_MULTI_SAMPLE_CONFIG_UNK16__SHIFT               16
> +#define VIVS_GL_MULTI_SAMPLE_CONFIG_UNK16(x)                   (((x) << VIVS_GL_MULTI_SAMPLE_CONFIG_UNK16__SHIFT) & VIVS_GL_MULTI_SAMPLE_CONFIG_UNK16__MASK)
> +#define VIVS_GL_MULTI_SAMPLE_CONFIG_UNK16_MASK                 0x00080000
> +
> +#define VIVS_GL_VARYING_TOTAL_COMPONENTS                       0x0000381c
> +#define VIVS_GL_VARYING_TOTAL_COMPONENTS_NUM__MASK             0x000000ff
> +#define VIVS_GL_VARYING_TOTAL_COMPONENTS_NUM__SHIFT            0
> +#define VIVS_GL_VARYING_TOTAL_COMPONENTS_NUM(x)                        (((x) << VIVS_GL_VARYING_TOTAL_COMPONENTS_NUM__SHIFT) & VIVS_GL_VARYING_TOTAL_COMPONENTS_NUM__MASK)
> +
> +#define VIVS_GL_VARYING_NUM_COMPONENTS                         0x00003820
> +#define VIVS_GL_VARYING_NUM_COMPONENTS_VAR0__MASK              0x00000007
> +#define VIVS_GL_VARYING_NUM_COMPONENTS_VAR0__SHIFT             0
> +#define VIVS_GL_VARYING_NUM_COMPONENTS_VAR0(x)                 (((x) << VIVS_GL_VARYING_NUM_COMPONENTS_VAR0__SHIFT) & VIVS_GL_VARYING_NUM_COMPONENTS_VAR0__MASK)
> +#define VIVS_GL_VARYING_NUM_COMPONENTS_VAR1__MASK              0x00000070
> +#define VIVS_GL_VARYING_NUM_COMPONENTS_VAR1__SHIFT             4
> +#define VIVS_GL_VARYING_NUM_COMPONENTS_VAR1(x)                 (((x) << VIVS_GL_VARYING_NUM_COMPONENTS_VAR1__SHIFT) & VIVS_GL_VARYING_NUM_COMPONENTS_VAR1__MASK)
> +#define VIVS_GL_VARYING_NUM_COMPONENTS_VAR2__MASK              0x00000700
> +#define VIVS_GL_VARYING_NUM_COMPONENTS_VAR2__SHIFT             8
> +#define VIVS_GL_VARYING_NUM_COMPONENTS_VAR2(x)                 (((x) << VIVS_GL_VARYING_NUM_COMPONENTS_VAR2__SHIFT) & VIVS_GL_VARYING_NUM_COMPONENTS_VAR2__MASK)
> +#define VIVS_GL_VARYING_NUM_COMPONENTS_VAR3__MASK              0x00007000
> +#define VIVS_GL_VARYING_NUM_COMPONENTS_VAR3__SHIFT             12
> +#define VIVS_GL_VARYING_NUM_COMPONENTS_VAR3(x)                 (((x) << VIVS_GL_VARYING_NUM_COMPONENTS_VAR3__SHIFT) & VIVS_GL_VARYING_NUM_COMPONENTS_VAR3__MASK)
> +#define VIVS_GL_VARYING_NUM_COMPONENTS_VAR4__MASK              0x00070000
> +#define VIVS_GL_VARYING_NUM_COMPONENTS_VAR4__SHIFT             16
> +#define VIVS_GL_VARYING_NUM_COMPONENTS_VAR4(x)                 (((x) << VIVS_GL_VARYING_NUM_COMPONENTS_VAR4__SHIFT) & VIVS_GL_VARYING_NUM_COMPONENTS_VAR4__MASK)
> +#define VIVS_GL_VARYING_NUM_COMPONENTS_VAR5__MASK              0x00700000
> +#define VIVS_GL_VARYING_NUM_COMPONENTS_VAR5__SHIFT             20
> +#define VIVS_GL_VARYING_NUM_COMPONENTS_VAR5(x)                 (((x) << VIVS_GL_VARYING_NUM_COMPONENTS_VAR5__SHIFT) & VIVS_GL_VARYING_NUM_COMPONENTS_VAR5__MASK)
> +#define VIVS_GL_VARYING_NUM_COMPONENTS_VAR6__MASK              0x07000000
> +#define VIVS_GL_VARYING_NUM_COMPONENTS_VAR6__SHIFT             24
> +#define VIVS_GL_VARYING_NUM_COMPONENTS_VAR6(x)                 (((x) << VIVS_GL_VARYING_NUM_COMPONENTS_VAR6__SHIFT) & VIVS_GL_VARYING_NUM_COMPONENTS_VAR6__MASK)
> +#define VIVS_GL_VARYING_NUM_COMPONENTS_VAR7__MASK              0x70000000
> +#define VIVS_GL_VARYING_NUM_COMPONENTS_VAR7__SHIFT             28
> +#define VIVS_GL_VARYING_NUM_COMPONENTS_VAR7(x)                 (((x) << VIVS_GL_VARYING_NUM_COMPONENTS_VAR7__SHIFT) & VIVS_GL_VARYING_NUM_COMPONENTS_VAR7__MASK)
> +
> +#define VIVS_GL_VARYING_COMPONENT_USE(i0)                     (0x00003828 + 0x4*(i0))
> +#define VIVS_GL_VARYING_COMPONENT_USE__ESIZE                   0x00000004
> +#define VIVS_GL_VARYING_COMPONENT_USE__LEN                     0x00000002
> +#define VIVS_GL_VARYING_COMPONENT_USE_COMP0__MASK              0x00000003
> +#define VIVS_GL_VARYING_COMPONENT_USE_COMP0__SHIFT             0
> +#define VIVS_GL_VARYING_COMPONENT_USE_COMP0(x)                 (((x) << VIVS_GL_VARYING_COMPONENT_USE_COMP0__SHIFT) & VIVS_GL_VARYING_COMPONENT_USE_COMP0__MASK)
> +#define VIVS_GL_VARYING_COMPONENT_USE_COMP1__MASK              0x0000000c
> +#define VIVS_GL_VARYING_COMPONENT_USE_COMP1__SHIFT             2
> +#define VIVS_GL_VARYING_COMPONENT_USE_COMP1(x)                 (((x) << VIVS_GL_VARYING_COMPONENT_USE_COMP1__SHIFT) & VIVS_GL_VARYING_COMPONENT_USE_COMP1__MASK)
> +#define VIVS_GL_VARYING_COMPONENT_USE_COMP2__MASK              0x00000030
> +#define VIVS_GL_VARYING_COMPONENT_USE_COMP2__SHIFT             4
> +#define VIVS_GL_VARYING_COMPONENT_USE_COMP2(x)                 (((x) << VIVS_GL_VARYING_COMPONENT_USE_COMP2__SHIFT) & VIVS_GL_VARYING_COMPONENT_USE_COMP2__MASK)
> +#define VIVS_GL_VARYING_COMPONENT_USE_COMP3__MASK              0x000000c0
> +#define VIVS_GL_VARYING_COMPONENT_USE_COMP3__SHIFT             6
> +#define VIVS_GL_VARYING_COMPONENT_USE_COMP3(x)                 (((x) << VIVS_GL_VARYING_COMPONENT_USE_COMP3__SHIFT) & VIVS_GL_VARYING_COMPONENT_USE_COMP3__MASK)
> +#define VIVS_GL_VARYING_COMPONENT_USE_COMP4__MASK              0x00000300
> +#define VIVS_GL_VARYING_COMPONENT_USE_COMP4__SHIFT             8
> +#define VIVS_GL_VARYING_COMPONENT_USE_COMP4(x)                 (((x) << VIVS_GL_VARYING_COMPONENT_USE_COMP4__SHIFT) & VIVS_GL_VARYING_COMPONENT_USE_COMP4__MASK)
> +#define VIVS_GL_VARYING_COMPONENT_USE_COMP5__MASK              0x00000c00
> +#define VIVS_GL_VARYING_COMPONENT_USE_COMP5__SHIFT             10
> +#define VIVS_GL_VARYING_COMPONENT_USE_COMP5(x)                 (((x) << VIVS_GL_VARYING_COMPONENT_USE_COMP5__SHIFT) & VIVS_GL_VARYING_COMPONENT_USE_COMP5__MASK)
> +#define VIVS_GL_VARYING_COMPONENT_USE_COMP6__MASK              0x00003000
> +#define VIVS_GL_VARYING_COMPONENT_USE_COMP6__SHIFT             12
> +#define VIVS_GL_VARYING_COMPONENT_USE_COMP6(x)                 (((x) << VIVS_GL_VARYING_COMPONENT_USE_COMP6__SHIFT) & VIVS_GL_VARYING_COMPONENT_USE_COMP6__MASK)
> +#define VIVS_GL_VARYING_COMPONENT_USE_COMP7__MASK              0x0000c000
> +#define VIVS_GL_VARYING_COMPONENT_USE_COMP7__SHIFT             14
> +#define VIVS_GL_VARYING_COMPONENT_USE_COMP7(x)                 (((x) << VIVS_GL_VARYING_COMPONENT_USE_COMP7__SHIFT) & VIVS_GL_VARYING_COMPONENT_USE_COMP7__MASK)
> +#define VIVS_GL_VARYING_COMPONENT_USE_COMP8__MASK              0x00030000
> +#define VIVS_GL_VARYING_COMPONENT_USE_COMP8__SHIFT             16
> +#define VIVS_GL_VARYING_COMPONENT_USE_COMP8(x)                 (((x) << VIVS_GL_VARYING_COMPONENT_USE_COMP8__SHIFT) & VIVS_GL_VARYING_COMPONENT_USE_COMP8__MASK)
> +#define VIVS_GL_VARYING_COMPONENT_USE_COMP9__MASK              0x000c0000
> +#define VIVS_GL_VARYING_COMPONENT_USE_COMP9__SHIFT             18
> +#define VIVS_GL_VARYING_COMPONENT_USE_COMP9(x)                 (((x) << VIVS_GL_VARYING_COMPONENT_USE_COMP9__SHIFT) & VIVS_GL_VARYING_COMPONENT_USE_COMP9__MASK)
> +#define VIVS_GL_VARYING_COMPONENT_USE_COMP10__MASK             0x00300000
> +#define VIVS_GL_VARYING_COMPONENT_USE_COMP10__SHIFT            20
> +#define VIVS_GL_VARYING_COMPONENT_USE_COMP10(x)                        (((x) << VIVS_GL_VARYING_COMPONENT_USE_COMP10__SHIFT) & VIVS_GL_VARYING_COMPONENT_USE_COMP10__MASK)
> +#define VIVS_GL_VARYING_COMPONENT_USE_COMP11__MASK             0x00c00000
> +#define VIVS_GL_VARYING_COMPONENT_USE_COMP11__SHIFT            22
> +#define VIVS_GL_VARYING_COMPONENT_USE_COMP11(x)                        (((x) << VIVS_GL_VARYING_COMPONENT_USE_COMP11__SHIFT) & VIVS_GL_VARYING_COMPONENT_USE_COMP11__MASK)
> +#define VIVS_GL_VARYING_COMPONENT_USE_COMP12__MASK             0x03000000
> +#define VIVS_GL_VARYING_COMPONENT_USE_COMP12__SHIFT            24
> +#define VIVS_GL_VARYING_COMPONENT_USE_COMP12(x)                        (((x) << VIVS_GL_VARYING_COMPONENT_USE_COMP12__SHIFT) & VIVS_GL_VARYING_COMPONENT_USE_COMP12__MASK)
> +#define VIVS_GL_VARYING_COMPONENT_USE_COMP13__MASK             0x0c000000
> +#define VIVS_GL_VARYING_COMPONENT_USE_COMP13__SHIFT            26
> +#define VIVS_GL_VARYING_COMPONENT_USE_COMP13(x)                        (((x) << VIVS_GL_VARYING_COMPONENT_USE_COMP13__SHIFT) & VIVS_GL_VARYING_COMPONENT_USE_COMP13__MASK)
> +#define VIVS_GL_VARYING_COMPONENT_USE_COMP14__MASK             0x30000000
> +#define VIVS_GL_VARYING_COMPONENT_USE_COMP14__SHIFT            28
> +#define VIVS_GL_VARYING_COMPONENT_USE_COMP14(x)                        (((x) << VIVS_GL_VARYING_COMPONENT_USE_COMP14__SHIFT) & VIVS_GL_VARYING_COMPONENT_USE_COMP14__MASK)
> +#define VIVS_GL_VARYING_COMPONENT_USE_COMP15__MASK             0xc0000000
> +#define VIVS_GL_VARYING_COMPONENT_USE_COMP15__SHIFT            30
> +#define VIVS_GL_VARYING_COMPONENT_USE_COMP15(x)                        (((x) << VIVS_GL_VARYING_COMPONENT_USE_COMP15__SHIFT) & VIVS_GL_VARYING_COMPONENT_USE_COMP15__MASK)
> +
> +#define VIVS_GL_UNK03834                                       0x00003834
> +
> +#define VIVS_GL_UNK03838                                       0x00003838
> +
> +#define VIVS_GL_API_MODE                                       0x0000384c
> +#define VIVS_GL_API_MODE_OPENGL                                        0x00000000
> +#define VIVS_GL_API_MODE_OPENVG                                        0x00000001
> +#define VIVS_GL_API_MODE_OPENCL                                        0x00000002
> +
> +#define VIVS_GL_CONTEXT_POINTER                                        0x00003850
> +
> +#define VIVS_GL_UNK03A00                                       0x00003a00
> +
> +#define VIVS_GL_STALL_TOKEN                                    0x00003c00
> +#define VIVS_GL_STALL_TOKEN_FROM__MASK                         0x0000001f
> +#define VIVS_GL_STALL_TOKEN_FROM__SHIFT                                0
> +#define VIVS_GL_STALL_TOKEN_FROM(x)                            (((x) << VIVS_GL_STALL_TOKEN_FROM__SHIFT) & VIVS_GL_STALL_TOKEN_FROM__MASK)
> +#define VIVS_GL_STALL_TOKEN_TO__MASK                           0x00001f00
> +#define VIVS_GL_STALL_TOKEN_TO__SHIFT                          8
> +#define VIVS_GL_STALL_TOKEN_TO(x)                              (((x) << VIVS_GL_STALL_TOKEN_TO__SHIFT) & VIVS_GL_STALL_TOKEN_TO__MASK)
> +#define VIVS_GL_STALL_TOKEN_FLIP0                              0x40000000
> +#define VIVS_GL_STALL_TOKEN_FLIP1                              0x80000000
> +
> +#define VIVS_DUMMY                                             0x00000000
> +
> +#define VIVS_DUMMY_DUMMY                                       0x0003fffc
> +
> +
> +#endif /* STATE_XML */
> diff --git a/drivers/staging/etnaviv/state_hi.xml.h b/drivers/staging/etnaviv/state_hi.xml.h
> new file mode 100644
> index 000000000000..0064f2640396
> --- /dev/null
> +++ b/drivers/staging/etnaviv/state_hi.xml.h
> @@ -0,0 +1,407 @@
> +#ifndef STATE_HI_XML
> +#define STATE_HI_XML
> +
> +/* Autogenerated file, DO NOT EDIT manually!
> +
> +This file was generated by the rules-ng-ng headergen tool in this git repository:
> +http://0x04.net/cgit/index.cgi/rules-ng-ng
> +git clone git://0x04.net/rules-ng-ng
> +
> +The rules-ng-ng source files this header was generated from are:
> +- state_hi.xml (  23420 bytes, from 2015-03-25 11:47:21)
> +- common.xml   (  18437 bytes, from 2015-03-25 11:27:41)
> +
> +Copyright (C) 2015
> +*/
> +
> +
> +#define MMU_EXCEPTION_SLAVE_NOT_PRESENT                                0x00000001
> +#define MMU_EXCEPTION_PAGE_NOT_PRESENT                         0x00000002
> +#define MMU_EXCEPTION_WRITE_VIOLATION                          0x00000003
> +#define VIVS_HI                                                        0x00000000
> +
> +#define VIVS_HI_CLOCK_CONTROL                                  0x00000000
> +#define VIVS_HI_CLOCK_CONTROL_CLK3D_DIS                                0x00000001
> +#define VIVS_HI_CLOCK_CONTROL_CLK2D_DIS                                0x00000002
> +#define VIVS_HI_CLOCK_CONTROL_FSCALE_VAL__MASK                 0x000001fc
> +#define VIVS_HI_CLOCK_CONTROL_FSCALE_VAL__SHIFT                        2
> +#define VIVS_HI_CLOCK_CONTROL_FSCALE_VAL(x)                    (((x) << VIVS_HI_CLOCK_CONTROL_FSCALE_VAL__SHIFT) & VIVS_HI_CLOCK_CONTROL_FSCALE_VAL__MASK)
> +#define VIVS_HI_CLOCK_CONTROL_FSCALE_CMD_LOAD                  0x00000200
> +#define VIVS_HI_CLOCK_CONTROL_DISABLE_RAM_CLK_GATING           0x00000400
> +#define VIVS_HI_CLOCK_CONTROL_DISABLE_DEBUG_REGISTERS          0x00000800
> +#define VIVS_HI_CLOCK_CONTROL_SOFT_RESET                       0x00001000
> +#define VIVS_HI_CLOCK_CONTROL_IDLE_3D                          0x00010000
> +#define VIVS_HI_CLOCK_CONTROL_IDLE_2D                          0x00020000
> +#define VIVS_HI_CLOCK_CONTROL_IDLE_VG                          0x00040000
> +#define VIVS_HI_CLOCK_CONTROL_ISOLATE_GPU                      0x00080000
> +#define VIVS_HI_CLOCK_CONTROL_DEBUG_PIXEL_PIPE__MASK           0x00f00000
> +#define VIVS_HI_CLOCK_CONTROL_DEBUG_PIXEL_PIPE__SHIFT          20
> +#define VIVS_HI_CLOCK_CONTROL_DEBUG_PIXEL_PIPE(x)              (((x) << VIVS_HI_CLOCK_CONTROL_DEBUG_PIXEL_PIPE__SHIFT) & VIVS_HI_CLOCK_CONTROL_DEBUG_PIXEL_PIPE__MASK)
> +
> +#define VIVS_HI_IDLE_STATE                                     0x00000004
> +#define VIVS_HI_IDLE_STATE_FE                                  0x00000001
> +#define VIVS_HI_IDLE_STATE_DE                                  0x00000002
> +#define VIVS_HI_IDLE_STATE_PE                                  0x00000004
> +#define VIVS_HI_IDLE_STATE_SH                                  0x00000008
> +#define VIVS_HI_IDLE_STATE_PA                                  0x00000010
> +#define VIVS_HI_IDLE_STATE_SE                                  0x00000020
> +#define VIVS_HI_IDLE_STATE_RA                                  0x00000040
> +#define VIVS_HI_IDLE_STATE_TX                                  0x00000080
> +#define VIVS_HI_IDLE_STATE_VG                                  0x00000100
> +#define VIVS_HI_IDLE_STATE_IM                                  0x00000200
> +#define VIVS_HI_IDLE_STATE_FP                                  0x00000400
> +#define VIVS_HI_IDLE_STATE_TS                                  0x00000800
> +#define VIVS_HI_IDLE_STATE_AXI_LP                              0x80000000
> +
> +#define VIVS_HI_AXI_CONFIG                                     0x00000008
> +#define VIVS_HI_AXI_CONFIG_AWID__MASK                          0x0000000f
> +#define VIVS_HI_AXI_CONFIG_AWID__SHIFT                         0
> +#define VIVS_HI_AXI_CONFIG_AWID(x)                             (((x) << VIVS_HI_AXI_CONFIG_AWID__SHIFT) & VIVS_HI_AXI_CONFIG_AWID__MASK)
> +#define VIVS_HI_AXI_CONFIG_ARID__MASK                          0x000000f0
> +#define VIVS_HI_AXI_CONFIG_ARID__SHIFT                         4
> +#define VIVS_HI_AXI_CONFIG_ARID(x)                             (((x) << VIVS_HI_AXI_CONFIG_ARID__SHIFT) & VIVS_HI_AXI_CONFIG_ARID__MASK)
> +#define VIVS_HI_AXI_CONFIG_AWCACHE__MASK                       0x00000f00
> +#define VIVS_HI_AXI_CONFIG_AWCACHE__SHIFT                      8
> +#define VIVS_HI_AXI_CONFIG_AWCACHE(x)                          (((x) << VIVS_HI_AXI_CONFIG_AWCACHE__SHIFT) & VIVS_HI_AXI_CONFIG_AWCACHE__MASK)
> +#define VIVS_HI_AXI_CONFIG_ARCACHE__MASK                       0x0000f000
> +#define VIVS_HI_AXI_CONFIG_ARCACHE__SHIFT                      12
> +#define VIVS_HI_AXI_CONFIG_ARCACHE(x)                          (((x) << VIVS_HI_AXI_CONFIG_ARCACHE__SHIFT) & VIVS_HI_AXI_CONFIG_ARCACHE__MASK)
> +
> +#define VIVS_HI_AXI_STATUS                                     0x0000000c
> +#define VIVS_HI_AXI_STATUS_WR_ERR_ID__MASK                     0x0000000f
> +#define VIVS_HI_AXI_STATUS_WR_ERR_ID__SHIFT                    0
> +#define VIVS_HI_AXI_STATUS_WR_ERR_ID(x)                                (((x) << VIVS_HI_AXI_STATUS_WR_ERR_ID__SHIFT) & VIVS_HI_AXI_STATUS_WR_ERR_ID__MASK)
> +#define VIVS_HI_AXI_STATUS_RD_ERR_ID__MASK                     0x000000f0
> +#define VIVS_HI_AXI_STATUS_RD_ERR_ID__SHIFT                    4
> +#define VIVS_HI_AXI_STATUS_RD_ERR_ID(x)                                (((x) << VIVS_HI_AXI_STATUS_RD_ERR_ID__SHIFT) & VIVS_HI_AXI_STATUS_RD_ERR_ID__MASK)
> +#define VIVS_HI_AXI_STATUS_DET_WR_ERR                          0x00000100
> +#define VIVS_HI_AXI_STATUS_DET_RD_ERR                          0x00000200
> +
> +#define VIVS_HI_INTR_ACKNOWLEDGE                               0x00000010
> +#define VIVS_HI_INTR_ACKNOWLEDGE_INTR_VEC__MASK                        0x7fffffff
> +#define VIVS_HI_INTR_ACKNOWLEDGE_INTR_VEC__SHIFT               0
> +#define VIVS_HI_INTR_ACKNOWLEDGE_INTR_VEC(x)                   (((x) << VIVS_HI_INTR_ACKNOWLEDGE_INTR_VEC__SHIFT) & VIVS_HI_INTR_ACKNOWLEDGE_INTR_VEC__MASK)
> +#define VIVS_HI_INTR_ACKNOWLEDGE_AXI_BUS_ERROR                 0x80000000
> +
> +#define VIVS_HI_INTR_ENBL                                      0x00000014
> +#define VIVS_HI_INTR_ENBL_INTR_ENBL_VEC__MASK                  0xffffffff
> +#define VIVS_HI_INTR_ENBL_INTR_ENBL_VEC__SHIFT                 0
> +#define VIVS_HI_INTR_ENBL_INTR_ENBL_VEC(x)                     (((x) << VIVS_HI_INTR_ENBL_INTR_ENBL_VEC__SHIFT) & VIVS_HI_INTR_ENBL_INTR_ENBL_VEC__MASK)
> +
> +#define VIVS_HI_CHIP_IDENTITY                                  0x00000018
> +#define VIVS_HI_CHIP_IDENTITY_FAMILY__MASK                     0xff000000
> +#define VIVS_HI_CHIP_IDENTITY_FAMILY__SHIFT                    24
> +#define VIVS_HI_CHIP_IDENTITY_FAMILY(x)                                (((x) << VIVS_HI_CHIP_IDENTITY_FAMILY__SHIFT) & VIVS_HI_CHIP_IDENTITY_FAMILY__MASK)
> +#define VIVS_HI_CHIP_IDENTITY_PRODUCT__MASK                    0x00ff0000
> +#define VIVS_HI_CHIP_IDENTITY_PRODUCT__SHIFT                   16
> +#define VIVS_HI_CHIP_IDENTITY_PRODUCT(x)                       (((x) << VIVS_HI_CHIP_IDENTITY_PRODUCT__SHIFT) & VIVS_HI_CHIP_IDENTITY_PRODUCT__MASK)
> +#define VIVS_HI_CHIP_IDENTITY_REVISION__MASK                   0x0000f000
> +#define VIVS_HI_CHIP_IDENTITY_REVISION__SHIFT                  12
> +#define VIVS_HI_CHIP_IDENTITY_REVISION(x)                      (((x) << VIVS_HI_CHIP_IDENTITY_REVISION__SHIFT) & VIVS_HI_CHIP_IDENTITY_REVISION__MASK)
> +
> +#define VIVS_HI_CHIP_FEATURE                                   0x0000001c
> +
> +#define VIVS_HI_CHIP_MODEL                                     0x00000020
> +
> +#define VIVS_HI_CHIP_REV                                       0x00000024
> +
> +#define VIVS_HI_CHIP_DATE                                      0x00000028
> +
> +#define VIVS_HI_CHIP_TIME                                      0x0000002c
> +
> +#define VIVS_HI_CHIP_MINOR_FEATURE_0                           0x00000034
> +
> +#define VIVS_HI_CACHE_CONTROL                                  0x00000038
> +
> +#define VIVS_HI_MEMORY_COUNTER_RESET                           0x0000003c
> +
> +#define VIVS_HI_PROFILE_READ_BYTES8                            0x00000040
> +
> +#define VIVS_HI_PROFILE_WRITE_BYTES8                           0x00000044
> +
> +#define VIVS_HI_CHIP_SPECS                                     0x00000048
> +#define VIVS_HI_CHIP_SPECS_STREAM_COUNT__MASK                  0x0000000f
> +#define VIVS_HI_CHIP_SPECS_STREAM_COUNT__SHIFT                 0
> +#define VIVS_HI_CHIP_SPECS_STREAM_COUNT(x)                     (((x) << VIVS_HI_CHIP_SPECS_STREAM_COUNT__SHIFT) & VIVS_HI_CHIP_SPECS_STREAM_COUNT__MASK)
> +#define VIVS_HI_CHIP_SPECS_REGISTER_MAX__MASK                  0x000000f0
> +#define VIVS_HI_CHIP_SPECS_REGISTER_MAX__SHIFT                 4
> +#define VIVS_HI_CHIP_SPECS_REGISTER_MAX(x)                     (((x) << VIVS_HI_CHIP_SPECS_REGISTER_MAX__SHIFT) & VIVS_HI_CHIP_SPECS_REGISTER_MAX__MASK)
> +#define VIVS_HI_CHIP_SPECS_THREAD_COUNT__MASK                  0x00000f00
> +#define VIVS_HI_CHIP_SPECS_THREAD_COUNT__SHIFT                 8
> +#define VIVS_HI_CHIP_SPECS_THREAD_COUNT(x)                     (((x) << VIVS_HI_CHIP_SPECS_THREAD_COUNT__SHIFT) & VIVS_HI_CHIP_SPECS_THREAD_COUNT__MASK)
> +#define VIVS_HI_CHIP_SPECS_VERTEX_CACHE_SIZE__MASK             0x0001f000
> +#define VIVS_HI_CHIP_SPECS_VERTEX_CACHE_SIZE__SHIFT            12
> +#define VIVS_HI_CHIP_SPECS_VERTEX_CACHE_SIZE(x)                        (((x) << VIVS_HI_CHIP_SPECS_VERTEX_CACHE_SIZE__SHIFT) & VIVS_HI_CHIP_SPECS_VERTEX_CACHE_SIZE__MASK)
> +#define VIVS_HI_CHIP_SPECS_SHADER_CORE_COUNT__MASK             0x01f00000
> +#define VIVS_HI_CHIP_SPECS_SHADER_CORE_COUNT__SHIFT            20
> +#define VIVS_HI_CHIP_SPECS_SHADER_CORE_COUNT(x)                        (((x) << VIVS_HI_CHIP_SPECS_SHADER_CORE_COUNT__SHIFT) & VIVS_HI_CHIP_SPECS_SHADER_CORE_COUNT__MASK)
> +#define VIVS_HI_CHIP_SPECS_PIXEL_PIPES__MASK                   0x0e000000
> +#define VIVS_HI_CHIP_SPECS_PIXEL_PIPES__SHIFT                  25
> +#define VIVS_HI_CHIP_SPECS_PIXEL_PIPES(x)                      (((x) << VIVS_HI_CHIP_SPECS_PIXEL_PIPES__SHIFT) & VIVS_HI_CHIP_SPECS_PIXEL_PIPES__MASK)
> +#define VIVS_HI_CHIP_SPECS_VERTEX_OUTPUT_BUFFER_SIZE__MASK     0xf0000000
> +#define VIVS_HI_CHIP_SPECS_VERTEX_OUTPUT_BUFFER_SIZE__SHIFT    28
> +#define VIVS_HI_CHIP_SPECS_VERTEX_OUTPUT_BUFFER_SIZE(x)                (((x) << VIVS_HI_CHIP_SPECS_VERTEX_OUTPUT_BUFFER_SIZE__SHIFT) & VIVS_HI_CHIP_SPECS_VERTEX_OUTPUT_BUFFER_SIZE__MASK)
> +
> +#define VIVS_HI_PROFILE_WRITE_BURSTS                           0x0000004c
> +
> +#define VIVS_HI_PROFILE_WRITE_REQUESTS                         0x00000050
> +
> +#define VIVS_HI_PROFILE_READ_BURSTS                            0x00000058
> +
> +#define VIVS_HI_PROFILE_READ_REQUESTS                          0x0000005c
> +
> +#define VIVS_HI_PROFILE_READ_LASTS                             0x00000060
> +
> +#define VIVS_HI_GP_OUT0                                                0x00000064
> +
> +#define VIVS_HI_GP_OUT1                                                0x00000068
> +
> +#define VIVS_HI_GP_OUT2                                                0x0000006c
> +
> +#define VIVS_HI_AXI_CONTROL                                    0x00000070
> +#define VIVS_HI_AXI_CONTROL_WR_FULL_BURST_MODE                 0x00000001
> +
> +#define VIVS_HI_CHIP_MINOR_FEATURE_1                           0x00000074
> +
> +#define VIVS_HI_PROFILE_TOTAL_CYCLES                           0x00000078
> +
> +#define VIVS_HI_PROFILE_IDLE_CYCLES                            0x0000007c
> +
> +#define VIVS_HI_CHIP_SPECS_2                                   0x00000080
> +#define VIVS_HI_CHIP_SPECS_2_BUFFER_SIZE__MASK                 0x000000ff
> +#define VIVS_HI_CHIP_SPECS_2_BUFFER_SIZE__SHIFT                        0
> +#define VIVS_HI_CHIP_SPECS_2_BUFFER_SIZE(x)                    (((x) << VIVS_HI_CHIP_SPECS_2_BUFFER_SIZE__SHIFT) & VIVS_HI_CHIP_SPECS_2_BUFFER_SIZE__MASK)
> +#define VIVS_HI_CHIP_SPECS_2_INSTRUCTION_COUNT__MASK           0x0000ff00
> +#define VIVS_HI_CHIP_SPECS_2_INSTRUCTION_COUNT__SHIFT          8
> +#define VIVS_HI_CHIP_SPECS_2_INSTRUCTION_COUNT(x)              (((x) << VIVS_HI_CHIP_SPECS_2_INSTRUCTION_COUNT__SHIFT) & VIVS_HI_CHIP_SPECS_2_INSTRUCTION_COUNT__MASK)
> +#define VIVS_HI_CHIP_SPECS_2_NUM_CONSTANTS__MASK               0xffff0000
> +#define VIVS_HI_CHIP_SPECS_2_NUM_CONSTANTS__SHIFT              16
> +#define VIVS_HI_CHIP_SPECS_2_NUM_CONSTANTS(x)                  (((x) << VIVS_HI_CHIP_SPECS_2_NUM_CONSTANTS__SHIFT) & VIVS_HI_CHIP_SPECS_2_NUM_CONSTANTS__MASK)
> +
> +#define VIVS_HI_CHIP_MINOR_FEATURE_2                           0x00000084
> +
> +#define VIVS_HI_CHIP_MINOR_FEATURE_3                           0x00000088
> +
> +#define VIVS_HI_CHIP_MINOR_FEATURE_4                           0x00000094
> +
> +#define VIVS_PM                                                        0x00000000
> +
> +#define VIVS_PM_POWER_CONTROLS                                 0x00000100
> +#define VIVS_PM_POWER_CONTROLS_ENABLE_MODULE_CLOCK_GATING      0x00000001
> +#define VIVS_PM_POWER_CONTROLS_DISABLE_STALL_MODULE_CLOCK_GATING       0x00000002
> +#define VIVS_PM_POWER_CONTROLS_DISABLE_STARVE_MODULE_CLOCK_GATING      0x00000004
> +#define VIVS_PM_POWER_CONTROLS_TURN_ON_COUNTER__MASK           0x000000f0
> +#define VIVS_PM_POWER_CONTROLS_TURN_ON_COUNTER__SHIFT          4
> +#define VIVS_PM_POWER_CONTROLS_TURN_ON_COUNTER(x)              (((x) << VIVS_PM_POWER_CONTROLS_TURN_ON_COUNTER__SHIFT) & VIVS_PM_POWER_CONTROLS_TURN_ON_COUNTER__MASK)
> +#define VIVS_PM_POWER_CONTROLS_TURN_OFF_COUNTER__MASK          0xffff0000
> +#define VIVS_PM_POWER_CONTROLS_TURN_OFF_COUNTER__SHIFT         16
> +#define VIVS_PM_POWER_CONTROLS_TURN_OFF_COUNTER(x)             (((x) << VIVS_PM_POWER_CONTROLS_TURN_OFF_COUNTER__SHIFT) & VIVS_PM_POWER_CONTROLS_TURN_OFF_COUNTER__MASK)
> +
> +#define VIVS_PM_MODULE_CONTROLS                                        0x00000104
> +#define VIVS_PM_MODULE_CONTROLS_DISABLE_MODULE_CLOCK_GATING_FE 0x00000001
> +#define VIVS_PM_MODULE_CONTROLS_DISABLE_MODULE_CLOCK_GATING_DE 0x00000002
> +#define VIVS_PM_MODULE_CONTROLS_DISABLE_MODULE_CLOCK_GATING_PE 0x00000004
> +
> +#define VIVS_PM_MODULE_STATUS                                  0x00000108
> +#define VIVS_PM_MODULE_STATUS_MODULE_CLOCK_GATED_FE            0x00000001
> +#define VIVS_PM_MODULE_STATUS_MODULE_CLOCK_GATED_DE            0x00000002
> +#define VIVS_PM_MODULE_STATUS_MODULE_CLOCK_GATED_PE            0x00000004
> +
> +#define VIVS_PM_PULSE_EATER                                    0x0000010c
> +
> +#define VIVS_MMUv2                                             0x00000000
> +
> +#define VIVS_MMUv2_SAFE_ADDRESS                                        0x00000180
> +
> +#define VIVS_MMUv2_CONFIGURATION                               0x00000184
> +#define VIVS_MMUv2_CONFIGURATION_MODE__MASK                    0x00000001
> +#define VIVS_MMUv2_CONFIGURATION_MODE__SHIFT                   0
> +#define VIVS_MMUv2_CONFIGURATION_MODE_MODE4_K                  0x00000000
> +#define VIVS_MMUv2_CONFIGURATION_MODE_MODE1_K                  0x00000001
> +#define VIVS_MMUv2_CONFIGURATION_MODE_MASK                     0x00000008
> +#define VIVS_MMUv2_CONFIGURATION_FLUSH__MASK                   0x00000010
> +#define VIVS_MMUv2_CONFIGURATION_FLUSH__SHIFT                  4
> +#define VIVS_MMUv2_CONFIGURATION_FLUSH_FLUSH                   0x00000010
> +#define VIVS_MMUv2_CONFIGURATION_FLUSH_MASK                    0x00000080
> +#define VIVS_MMUv2_CONFIGURATION_ADDRESS_MASK                  0x00000100
> +#define VIVS_MMUv2_CONFIGURATION_ADDRESS__MASK                 0xfffffc00
> +#define VIVS_MMUv2_CONFIGURATION_ADDRESS__SHIFT                        10
> +#define VIVS_MMUv2_CONFIGURATION_ADDRESS(x)                    (((x) << VIVS_MMUv2_CONFIGURATION_ADDRESS__SHIFT) & VIVS_MMUv2_CONFIGURATION_ADDRESS__MASK)
> +
> +#define VIVS_MMUv2_STATUS                                      0x00000188
> +#define VIVS_MMUv2_STATUS_EXCEPTION0__MASK                     0x00000003
> +#define VIVS_MMUv2_STATUS_EXCEPTION0__SHIFT                    0
> +#define VIVS_MMUv2_STATUS_EXCEPTION0(x)                                (((x) << VIVS_MMUv2_STATUS_EXCEPTION0__SHIFT) & VIVS_MMUv2_STATUS_EXCEPTION0__MASK)
> +#define VIVS_MMUv2_STATUS_EXCEPTION1__MASK                     0x00000030
> +#define VIVS_MMUv2_STATUS_EXCEPTION1__SHIFT                    4
> +#define VIVS_MMUv2_STATUS_EXCEPTION1(x)                                (((x) << VIVS_MMUv2_STATUS_EXCEPTION1__SHIFT) & VIVS_MMUv2_STATUS_EXCEPTION1__MASK)
> +#define VIVS_MMUv2_STATUS_EXCEPTION2__MASK                     0x00000300
> +#define VIVS_MMUv2_STATUS_EXCEPTION2__SHIFT                    8
> +#define VIVS_MMUv2_STATUS_EXCEPTION2(x)                                (((x) << VIVS_MMUv2_STATUS_EXCEPTION2__SHIFT) & VIVS_MMUv2_STATUS_EXCEPTION2__MASK)
> +#define VIVS_MMUv2_STATUS_EXCEPTION3__MASK                     0x00003000
> +#define VIVS_MMUv2_STATUS_EXCEPTION3__SHIFT                    12
> +#define VIVS_MMUv2_STATUS_EXCEPTION3(x)                                (((x) << VIVS_MMUv2_STATUS_EXCEPTION3__SHIFT) & VIVS_MMUv2_STATUS_EXCEPTION3__MASK)
> +
> +#define VIVS_MMUv2_CONTROL                                     0x0000018c
> +#define VIVS_MMUv2_CONTROL_ENABLE                              0x00000001
> +
> +#define VIVS_MMUv2_EXCEPTION_ADDR(i0)                         (0x00000190 + 0x4*(i0))
> +#define VIVS_MMUv2_EXCEPTION_ADDR__ESIZE                       0x00000004
> +#define VIVS_MMUv2_EXCEPTION_ADDR__LEN                         0x00000004
> +
> +#define VIVS_MC                                                        0x00000000
> +
> +#define VIVS_MC_MMU_FE_PAGE_TABLE                              0x00000400
> +
> +#define VIVS_MC_MMU_TX_PAGE_TABLE                              0x00000404
> +
> +#define VIVS_MC_MMU_PE_PAGE_TABLE                              0x00000408
> +
> +#define VIVS_MC_MMU_PEZ_PAGE_TABLE                             0x0000040c
> +
> +#define VIVS_MC_MMU_RA_PAGE_TABLE                              0x00000410
> +
> +#define VIVS_MC_DEBUG_MEMORY                                   0x00000414
> +#define VIVS_MC_DEBUG_MEMORY_SPECIAL_PATCH_GC320               0x00000008
> +#define VIVS_MC_DEBUG_MEMORY_FAST_CLEAR_BYPASS                 0x00100000
> +#define VIVS_MC_DEBUG_MEMORY_COMPRESSION_BYPASS                        0x00200000
> +
> +#define VIVS_MC_MEMORY_BASE_ADDR_RA                            0x00000418
> +
> +#define VIVS_MC_MEMORY_BASE_ADDR_FE                            0x0000041c
> +
> +#define VIVS_MC_MEMORY_BASE_ADDR_TX                            0x00000420
> +
> +#define VIVS_MC_MEMORY_BASE_ADDR_PEZ                           0x00000424
> +
> +#define VIVS_MC_MEMORY_BASE_ADDR_PE                            0x00000428
> +
> +#define VIVS_MC_MEMORY_TIMING_CONTROL                          0x0000042c
> +
> +#define VIVS_MC_MEMORY_FLUSH                                   0x00000430
> +
> +#define VIVS_MC_PROFILE_CYCLE_COUNTER                          0x00000438
> +
> +#define VIVS_MC_DEBUG_READ0                                    0x0000043c
> +
> +#define VIVS_MC_DEBUG_READ1                                    0x00000440
> +
> +#define VIVS_MC_DEBUG_WRITE                                    0x00000444
> +
> +#define VIVS_MC_PROFILE_RA_READ                                        0x00000448
> +
> +#define VIVS_MC_PROFILE_TX_READ                                        0x0000044c
> +
> +#define VIVS_MC_PROFILE_FE_READ                                        0x00000450
> +
> +#define VIVS_MC_PROFILE_PE_READ                                        0x00000454
> +
> +#define VIVS_MC_PROFILE_DE_READ                                        0x00000458
> +
> +#define VIVS_MC_PROFILE_SH_READ                                        0x0000045c
> +
> +#define VIVS_MC_PROFILE_PA_READ                                        0x00000460
> +
> +#define VIVS_MC_PROFILE_SE_READ                                        0x00000464
> +
> +#define VIVS_MC_PROFILE_MC_READ                                        0x00000468
> +
> +#define VIVS_MC_PROFILE_HI_READ                                        0x0000046c
> +
> +#define VIVS_MC_PROFILE_CONFIG0                                        0x00000470
> +#define VIVS_MC_PROFILE_CONFIG0_FE__MASK                       0x0000000f
> +#define VIVS_MC_PROFILE_CONFIG0_FE__SHIFT                      0
> +#define VIVS_MC_PROFILE_CONFIG0_FE_RESET                       0x0000000f
> +#define VIVS_MC_PROFILE_CONFIG0_DE__MASK                       0x00000f00
> +#define VIVS_MC_PROFILE_CONFIG0_DE__SHIFT                      8
> +#define VIVS_MC_PROFILE_CONFIG0_DE_RESET                       0x00000f00
> +#define VIVS_MC_PROFILE_CONFIG0_PE__MASK                       0x000f0000
> +#define VIVS_MC_PROFILE_CONFIG0_PE__SHIFT                      16
> +#define VIVS_MC_PROFILE_CONFIG0_PE_PIXEL_COUNT_KILLED_BY_COLOR_PIPE    0x00000000
> +#define VIVS_MC_PROFILE_CONFIG0_PE_PIXEL_COUNT_KILLED_BY_DEPTH_PIPE    0x00010000
> +#define VIVS_MC_PROFILE_CONFIG0_PE_PIXEL_COUNT_DRAWN_BY_COLOR_PIPE     0x00020000
> +#define VIVS_MC_PROFILE_CONFIG0_PE_PIXEL_COUNT_DRAWN_BY_DEPTH_PIPE     0x00030000
> +#define VIVS_MC_PROFILE_CONFIG0_PE_PIXELS_RENDERED_2D          0x000b0000
> +#define VIVS_MC_PROFILE_CONFIG0_PE_RESET                       0x000f0000
> +#define VIVS_MC_PROFILE_CONFIG0_SH__MASK                       0x0f000000
> +#define VIVS_MC_PROFILE_CONFIG0_SH__SHIFT                      24
> +#define VIVS_MC_PROFILE_CONFIG0_SH_SHADER_CYCLES               0x04000000
> +#define VIVS_MC_PROFILE_CONFIG0_SH_PS_INST_COUNTER             0x07000000
> +#define VIVS_MC_PROFILE_CONFIG0_SH_RENDERED_PIXEL_COUNTER      0x08000000
> +#define VIVS_MC_PROFILE_CONFIG0_SH_VS_INST_COUNTER             0x09000000
> +#define VIVS_MC_PROFILE_CONFIG0_SH_RENDERED_VERTICE_COUNTER    0x0a000000
> +#define VIVS_MC_PROFILE_CONFIG0_SH_VTX_BRANCH_INST_COUNTER     0x0b000000
> +#define VIVS_MC_PROFILE_CONFIG0_SH_VTX_TEXLD_INST_COUNTER      0x0c000000
> +#define VIVS_MC_PROFILE_CONFIG0_SH_PXL_BRANCH_INST_COUNTER     0x0d000000
> +#define VIVS_MC_PROFILE_CONFIG0_SH_PXL_TEXLD_INST_COUNTER      0x0e000000
> +#define VIVS_MC_PROFILE_CONFIG0_SH_RESET                       0x0f000000
> +
> +#define VIVS_MC_PROFILE_CONFIG1                                        0x00000474
> +#define VIVS_MC_PROFILE_CONFIG1_PA__MASK                       0x0000000f
> +#define VIVS_MC_PROFILE_CONFIG1_PA__SHIFT                      0
> +#define VIVS_MC_PROFILE_CONFIG1_PA_INPUT_VTX_COUNTER           0x00000003
> +#define VIVS_MC_PROFILE_CONFIG1_PA_INPUT_PRIM_COUNTER          0x00000004
> +#define VIVS_MC_PROFILE_CONFIG1_PA_OUTPUT_PRIM_COUNTER         0x00000005
> +#define VIVS_MC_PROFILE_CONFIG1_PA_DEPTH_CLIPPED_COUNTER       0x00000006
> +#define VIVS_MC_PROFILE_CONFIG1_PA_TRIVIAL_REJECTED_COUNTER    0x00000007
> +#define VIVS_MC_PROFILE_CONFIG1_PA_CULLED_COUNTER              0x00000008
> +#define VIVS_MC_PROFILE_CONFIG1_PA_RESET                       0x0000000f
> +#define VIVS_MC_PROFILE_CONFIG1_SE__MASK                       0x00000f00
> +#define VIVS_MC_PROFILE_CONFIG1_SE__SHIFT                      8
> +#define VIVS_MC_PROFILE_CONFIG1_SE_CULLED_TRIANGLE_COUNT       0x00000000
> +#define VIVS_MC_PROFILE_CONFIG1_SE_CULLED_LINES_COUNT          0x00000100
> +#define VIVS_MC_PROFILE_CONFIG1_SE_RESET                       0x00000f00
> +#define VIVS_MC_PROFILE_CONFIG1_RA__MASK                       0x000f0000
> +#define VIVS_MC_PROFILE_CONFIG1_RA__SHIFT                      16
> +#define VIVS_MC_PROFILE_CONFIG1_RA_VALID_PIXEL_COUNT           0x00000000
> +#define VIVS_MC_PROFILE_CONFIG1_RA_TOTAL_QUAD_COUNT            0x00010000
> +#define VIVS_MC_PROFILE_CONFIG1_RA_VALID_QUAD_COUNT_AFTER_EARLY_Z      0x00020000
> +#define VIVS_MC_PROFILE_CONFIG1_RA_TOTAL_PRIMITIVE_COUNT       0x00030000
> +#define VIVS_MC_PROFILE_CONFIG1_RA_PIPE_CACHE_MISS_COUNTER     0x00090000
> +#define VIVS_MC_PROFILE_CONFIG1_RA_PREFETCH_CACHE_MISS_COUNTER 0x000a0000
> +#define VIVS_MC_PROFILE_CONFIG1_RA_CULLED_QUAD_COUNT           0x000b0000
> +#define VIVS_MC_PROFILE_CONFIG1_RA_RESET                       0x000f0000
> +#define VIVS_MC_PROFILE_CONFIG1_TX__MASK                       0x0f000000
> +#define VIVS_MC_PROFILE_CONFIG1_TX__SHIFT                      24
> +#define VIVS_MC_PROFILE_CONFIG1_TX_TOTAL_BILINEAR_REQUESTS     0x00000000
> +#define VIVS_MC_PROFILE_CONFIG1_TX_TOTAL_TRILINEAR_REQUESTS    0x01000000
> +#define VIVS_MC_PROFILE_CONFIG1_TX_TOTAL_DISCARDED_TEXTURE_REQUESTS    0x02000000
> +#define VIVS_MC_PROFILE_CONFIG1_TX_TOTAL_TEXTURE_REQUESTS      0x03000000
> +#define VIVS_MC_PROFILE_CONFIG1_TX_UNKNOWN                     0x04000000
> +#define VIVS_MC_PROFILE_CONFIG1_TX_MEM_READ_COUNT              0x05000000
> +#define VIVS_MC_PROFILE_CONFIG1_TX_MEM_READ_IN_8B_COUNT                0x06000000
> +#define VIVS_MC_PROFILE_CONFIG1_TX_CACHE_MISS_COUNT            0x07000000
> +#define VIVS_MC_PROFILE_CONFIG1_TX_CACHE_HIT_TEXEL_COUNT       0x08000000
> +#define VIVS_MC_PROFILE_CONFIG1_TX_CACHE_MISS_TEXEL_COUNT      0x09000000
> +#define VIVS_MC_PROFILE_CONFIG1_TX_RESET                       0x0f000000
> +
> +#define VIVS_MC_PROFILE_CONFIG2                                        0x00000478
> +#define VIVS_MC_PROFILE_CONFIG2_MC__MASK                       0x0000000f
> +#define VIVS_MC_PROFILE_CONFIG2_MC__SHIFT                      0
> +#define VIVS_MC_PROFILE_CONFIG2_MC_TOTAL_READ_REQ_8B_FROM_PIPELINE     0x00000001
> +#define VIVS_MC_PROFILE_CONFIG2_MC_TOTAL_READ_REQ_8B_FROM_IP   0x00000002
> +#define VIVS_MC_PROFILE_CONFIG2_MC_TOTAL_WRITE_REQ_8B_FROM_PIPELINE    0x00000003
> +#define VIVS_MC_PROFILE_CONFIG2_MC_RESET                       0x0000000f
> +#define VIVS_MC_PROFILE_CONFIG2_HI__MASK                       0x00000f00
> +#define VIVS_MC_PROFILE_CONFIG2_HI__SHIFT                      8
> +#define VIVS_MC_PROFILE_CONFIG2_HI_AXI_CYCLES_READ_REQUEST_STALLED     0x00000000
> +#define VIVS_MC_PROFILE_CONFIG2_HI_AXI_CYCLES_WRITE_REQUEST_STALLED    0x00000100
> +#define VIVS_MC_PROFILE_CONFIG2_HI_AXI_CYCLES_WRITE_DATA_STALLED       0x00000200
> +#define VIVS_MC_PROFILE_CONFIG2_HI_RESET                       0x00000f00
> +
> +#define VIVS_MC_PROFILE_CONFIG3                                        0x0000047c
> +
> +#define VIVS_MC_BUS_CONFIG                                     0x00000480
> +#define VIVS_MC_BUS_CONFIG_FE_BUS_CONFIG__MASK                 0x0000000f
> +#define VIVS_MC_BUS_CONFIG_FE_BUS_CONFIG__SHIFT                        0
> +#define VIVS_MC_BUS_CONFIG_FE_BUS_CONFIG(x)                    (((x) << VIVS_MC_BUS_CONFIG_FE_BUS_CONFIG__SHIFT) & VIVS_MC_BUS_CONFIG_FE_BUS_CONFIG__MASK)
> +#define VIVS_MC_BUS_CONFIG_TX_BUS_CONFIG__MASK                 0x000000f0
> +#define VIVS_MC_BUS_CONFIG_TX_BUS_CONFIG__SHIFT                        4
> +#define VIVS_MC_BUS_CONFIG_TX_BUS_CONFIG(x)                    (((x) << VIVS_MC_BUS_CONFIG_TX_BUS_CONFIG__SHIFT) & VIVS_MC_BUS_CONFIG_TX_BUS_CONFIG__MASK)
> +
> +#define VIVS_MC_START_COMPOSITION                              0x00000554
> +
> +#define VIVS_MC_128B_MERGE                                     0x00000558
> +
> +
> +#endif /* STATE_HI_XML */
> diff --git a/include/uapi/drm/etnaviv_drm.h b/include/uapi/drm/etnaviv_drm.h
> new file mode 100644
> index 000000000000..5f1206b3f9ba
> --- /dev/null
> +++ b/include/uapi/drm/etnaviv_drm.h
> @@ -0,0 +1,215 @@
> +/*
> + * Copyright (C) 2015 Etnaviv Project
> + *
> + * This program is free software; you can redistribute it and/or modify it
> + * under the terms of the GNU General Public License version 2 as published by
> + * the Free Software Foundation.
> + *
> + * This program is distributed in the hope that it will be useful, but WITHOUT
> + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
> + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
> + * more details.
> + *
> + * You should have received a copy of the GNU General Public License along with
> + * this program.  If not, see <http://www.gnu.org/licenses/>.
> + */
> +
> +#ifndef __ETNAVIV_DRM_H__
> +#define __ETNAVIV_DRM_H__
> +
> +#include <drm/drm.h>
> +
> +/* Please note that modifications to all structs defined here are
> + * subject to backwards-compatibility constraints:
> + *  1) Do not use pointers, use __u64 instead for 32 bit / 64 bit
> + *     user/kernel compatibility
> + *  2) Keep fields aligned to their size
> + *  3) Because of how drm_ioctl() works, we can add new fields at
> + *     the end of an ioctl if some care is taken: drm_ioctl() will
> + *     zero out the new fields at the tail of the ioctl, so a zero
> + *     value should have a backwards compatible meaning.  And for
> + *     output params, userspace won't see the newly added output
> + *     fields.. so that has to be somehow ok.
> + */
> +
> +/* timeouts are specified in clock-monotonic absolute times (to simplify
> + * restarting interrupted ioctls).  The following struct is logically the
> + * same as 'struct timespec' but 32/64b ABI safe.
> + */
> +struct drm_etnaviv_timespec {
> +       __s64 tv_sec;          /* seconds */
> +       __s64 tv_nsec;         /* nanoseconds */
> +};
> +
> +#define ETNAVIV_PARAM_GPU_MODEL                     0x01
> +#define ETNAVIV_PARAM_GPU_REVISION                  0x02
> +#define ETNAVIV_PARAM_GPU_FEATURES_0                0x03
> +#define ETNAVIV_PARAM_GPU_FEATURES_1                0x04
> +#define ETNAVIV_PARAM_GPU_FEATURES_2                0x05
> +#define ETNAVIV_PARAM_GPU_FEATURES_3                0x06
> +#define ETNAVIV_PARAM_GPU_FEATURES_4                0x07
> +
> +#define ETNAVIV_PARAM_GPU_STREAM_COUNT              0x10
> +#define ETNAVIV_PARAM_GPU_REGISTER_MAX              0x11
> +#define ETNAVIV_PARAM_GPU_THREAD_COUNT              0x12
> +#define ETNAVIV_PARAM_GPU_VERTEX_CACHE_SIZE         0x13
> +#define ETNAVIV_PARAM_GPU_SHADER_CORE_COUNT         0x14
> +#define ETNAVIV_PARAM_GPU_PIXEL_PIPES               0x15
> +#define ETNAVIV_PARAM_GPU_VERTEX_OUTPUT_BUFFER_SIZE 0x16
> +#define ETNAVIV_PARAM_GPU_BUFFER_SIZE               0x17
> +#define ETNAVIV_PARAM_GPU_INSTRUCTION_COUNT         0x18
> +#define ETNAVIV_PARAM_GPU_NUM_CONSTANTS             0x19
> +
> +#define ETNA_MAX_PIPES 4
> +
> +struct drm_etnaviv_param {
> +       __u32 pipe;           /* in */
> +       __u32 param;          /* in, ETNAVIV_PARAM_x */
> +       __u64 value;          /* out (get_param) or in (set_param) */
> +};
> +
> +/*
> + * GEM buffers:
> + */
> +
> +#define ETNA_BO_CACHE_MASK   0x000f0000
> +/* cache modes */
> +#define ETNA_BO_CACHED       0x00010000
> +#define ETNA_BO_WC           0x00020000
> +#define ETNA_BO_UNCACHED     0x00040000
> +/* map flags */
> +#define ETNA_BO_FORCE_MMU    0x00100000
> +
> +struct drm_etnaviv_gem_new {
> +       __u64 size;           /* in */
> +       __u32 flags;          /* in, mask of ETNA_BO_x */
> +       __u32 handle;         /* out */
> +};
> +
> +struct drm_etnaviv_gem_info {
> +       __u32 handle;         /* in */
> +       __u32 pad;
> +       __u64 offset;         /* out, offset to pass to mmap() */
> +};
> +
> +#define ETNA_PREP_READ        0x01
> +#define ETNA_PREP_WRITE       0x02
> +#define ETNA_PREP_NOSYNC      0x04
> +
> +struct drm_etnaviv_gem_cpu_prep {
> +       __u32 handle;         /* in */
> +       __u32 op;             /* in, mask of ETNA_PREP_x */
> +       struct drm_etnaviv_timespec timeout;   /* in */
> +};
> +
> +struct drm_etnaviv_gem_cpu_fini {
> +       __u32 handle;         /* in */
> +};
> +
> +/*
> + * Cmdstream Submission:
> + */
> +
> +/* The value written into the cmdstream is logically:
> + * relocbuf->gpuaddr + reloc_offset
> + *
> + * NOTE that reloc's must be sorted by order of increasing submit_offset,
> + * otherwise EINVAL.
> + */
> +struct drm_etnaviv_gem_submit_reloc {
> +       __u32 submit_offset;  /* in, offset from submit_bo */
> +       __u32 reloc_idx;      /* in, index of reloc_bo buffer */
> +       __u64 reloc_offset;   /* in, offset from start of reloc_bo */
> +};
> +
> +/* Each buffer referenced elsewhere in the cmdstream submit (ie. the
> + * cmdstream buffer(s) themselves or reloc entries) has one (and only
> + * one) entry in the submit->bos[] table.
> + *
> + * As a optimization, the current buffer (gpu virtual address) can be
> + * passed back through the 'presumed' field.  If on a subsequent reloc,
> + * userspace passes back a 'presumed' address that is still valid,
> + * then patching the cmdstream for this entry is skipped.  This can
> + * avoid kernel needing to map/access the cmdstream bo in the common
> + * case.
> + */
> +#define ETNA_SUBMIT_BO_READ             0x0001
> +#define ETNA_SUBMIT_BO_WRITE            0x0002
> +struct drm_etnaviv_gem_submit_bo {
> +       __u32 flags;          /* in, mask of ETNA_SUBMIT_BO_x */
> +       __u32 handle;         /* in, GEM handle */
> +       __u64 presumed;       /* in/out, presumed buffer address */
> +};
> +
> +/* Each cmdstream submit consists of a table of buffers involved, and
> + * one or more cmdstream buffers.  This allows for conditional execution
> + * (context-restore), and IB buffers needed for per tile/bin draw cmds.
> + */
> +#define ETNA_PIPE_3D      0x00
> +#define ETNA_PIPE_2D      0x01
> +#define ETNA_PIPE_VG      0x02
> +struct drm_etnaviv_gem_submit {
> +       __u32 fence;          /* out */
> +       __u32 pipe;           /* in */
> +       __u32 exec_state;     /* in, initial execution state (ETNA_PIPE_x) */
> +       __u32 nr_bos;         /* in, number of submit_bo's */
> +       __u32 nr_relocs;      /* in, number of submit_reloc's */
> +       __u32 stream_size;    /* in, cmdstream size */
> +       __u64 bos;            /* in, ptr to array of submit_bo's */
> +       __u64 relocs;         /* in, ptr to array of submit_reloc's */
> +       __u64 stream;         /* in, ptr to cmdstream */
> +};
> +
> +/* The normal way to synchronize with the GPU is just to CPU_PREP on
> + * a buffer if you need to access it from the CPU (other cmdstream
> + * submission from same or other contexts, PAGE_FLIP ioctl, etc, all
> + * handle the required synchronization under the hood).  This ioctl
> + * mainly just exists as a way to implement the gallium pipe_fence
> + * APIs without requiring a dummy bo to synchronize on.
> + */
> +struct drm_etnaviv_wait_fence {
> +       __u32 pipe;           /* in */
> +       __u32 fence;          /* in */
> +       struct drm_etnaviv_timespec timeout;   /* in */
> +};
> +
> +#define ETNA_USERPTR_READ      0x01
> +#define ETNA_USERPTR_WRITE     0x02
> +struct drm_etnaviv_gem_userptr {
> +       __u64 user_ptr; /* in, page aligned user pointer */
> +       __u64 user_size;        /* in, page aligned user size */
> +       __u32 flags;            /* in, flags */
> +       __u32 handle;   /* out, non-zero handle */
> +};
> +
> +struct drm_etnaviv_gem_wait {
> +       __u32 pipe;                             /* in */
> +       __u32 handle;                           /* in, bo to be waited for */
> +       struct drm_etnaviv_timespec timeout;    /* in */
> +};
> +
> +#define DRM_ETNAVIV_GET_PARAM          0x00
> +/* placeholder:
> +#define DRM_ETNAVIV_SET_PARAM          0x01
> + */
> +#define DRM_ETNAVIV_GEM_NEW            0x02
> +#define DRM_ETNAVIV_GEM_INFO           0x03
> +#define DRM_ETNAVIV_GEM_CPU_PREP       0x04
> +#define DRM_ETNAVIV_GEM_CPU_FINI       0x05
> +#define DRM_ETNAVIV_GEM_SUBMIT         0x06
> +#define DRM_ETNAVIV_WAIT_FENCE         0x07
> +#define DRM_ETNAVIV_GEM_USERPTR        0x08
> +#define DRM_ETNAVIV_GEM_WAIT           0x09
> +#define DRM_ETNAVIV_NUM_IOCTLS         0x0a
> +
> +#define DRM_IOCTL_ETNAVIV_GET_PARAM    DRM_IOWR(DRM_COMMAND_BASE + DRM_ETNAVIV_GET_PARAM, struct drm_etnaviv_param)
> +#define DRM_IOCTL_ETNAVIV_GEM_NEW      DRM_IOWR(DRM_COMMAND_BASE + DRM_ETNAVIV_GEM_NEW, struct drm_etnaviv_gem_new)
> +#define DRM_IOCTL_ETNAVIV_GEM_INFO     DRM_IOWR(DRM_COMMAND_BASE + DRM_ETNAVIV_GEM_INFO, struct drm_etnaviv_gem_info)
> +#define DRM_IOCTL_ETNAVIV_GEM_CPU_PREP DRM_IOW(DRM_COMMAND_BASE + DRM_ETNAVIV_GEM_CPU_PREP, struct drm_etnaviv_gem_cpu_prep)
> +#define DRM_IOCTL_ETNAVIV_GEM_CPU_FINI DRM_IOW(DRM_COMMAND_BASE + DRM_ETNAVIV_GEM_CPU_FINI, struct drm_etnaviv_gem_cpu_fini)
> +#define DRM_IOCTL_ETNAVIV_GEM_SUBMIT   DRM_IOWR(DRM_COMMAND_BASE + DRM_ETNAVIV_GEM_SUBMIT, struct drm_etnaviv_gem_submit)
> +#define DRM_IOCTL_ETNAVIV_WAIT_FENCE   DRM_IOW(DRM_COMMAND_BASE + DRM_ETNAVIV_WAIT_FENCE, struct drm_etnaviv_wait_fence)
> +#define DRM_IOCTL_ETNAVIV_GEM_USERPTR  DRM_IOWR(DRM_COMMAND_BASE + DRM_ETNAVIV_GEM_USERPTR, struct drm_etnaviv_gem_userptr)
> +#define DRM_IOCTL_ETNAVIV_GEM_WAIT     DRM_IOW(DRM_COMMAND_BASE + DRM_ETNAVIV_GEM_WAIT, struct drm_etnaviv_gem_wait)
> +
> +#endif /* __ETNAVIV_DRM_H__ */
> --
> 2.5.0
>
> _______________________________________________
> dri-devel mailing list
> dri-devel@xxxxxxxxxxxxxxxxxxxxx
> http://lists.freedesktop.org/mailman/listinfo/dri-devel
_______________________________________________
dri-devel mailing list
dri-devel@xxxxxxxxxxxxxxxxxxxxx
http://lists.freedesktop.org/mailman/listinfo/dri-devel





[Index of Archives]     [Linux DRI Users]     [Linux Intel Graphics]     [Linux USB Devel]     [Video for Linux]     [Linux Audio Users]     [Yosemite News]     [Linux Kernel]     [Linux SCSI]     [XFree86]     [Linux USB Devel]     [Video for Linux]     [Linux Audio Users]     [Linux Kernel]     [Linux SCSI]     [XFree86]
  Powered by Linux