Initial commit
This commit is contained in:
commit
169c65d57e
51358 changed files with 23120455 additions and 0 deletions
22
drivers/gpu/drm/vmwgfx/Kconfig
Normal file
22
drivers/gpu/drm/vmwgfx/Kconfig
Normal file
|
@ -0,0 +1,22 @@
|
|||
config DRM_VMWGFX
|
||||
tristate "DRM driver for VMware Virtual GPU"
|
||||
depends on DRM && PCI && FB
|
||||
select FB_DEFERRED_IO
|
||||
select FB_CFB_FILLRECT
|
||||
select FB_CFB_COPYAREA
|
||||
select FB_CFB_IMAGEBLIT
|
||||
select DRM_TTM
|
||||
help
|
||||
Choose this option if you would like to run 3D acceleration
|
||||
in a VMware virtual machine.
|
||||
This is a KMS enabled DRM driver for the VMware SVGA2
|
||||
virtual hardware.
|
||||
The compiled module will be called "vmwgfx.ko".
|
||||
|
||||
config DRM_VMWGFX_FBCON
|
||||
depends on DRM_VMWGFX
|
||||
bool "Enable framebuffer console under vmwgfx by default"
|
||||
help
|
||||
Choose this option if you are shipping a new vmwgfx
|
||||
userspace driver that supports using the kernel driver.
|
||||
|
11
drivers/gpu/drm/vmwgfx/Makefile
Normal file
11
drivers/gpu/drm/vmwgfx/Makefile
Normal file
|
@ -0,0 +1,11 @@
|
|||
|
||||
ccflags-y := -Iinclude/drm
|
||||
|
||||
vmwgfx-y := vmwgfx_execbuf.o vmwgfx_gmr.o vmwgfx_kms.o vmwgfx_drv.o \
|
||||
vmwgfx_fb.o vmwgfx_ioctl.o vmwgfx_resource.o vmwgfx_buffer.o \
|
||||
vmwgfx_fifo.o vmwgfx_irq.o vmwgfx_ldu.o vmwgfx_ttm_glue.o \
|
||||
vmwgfx_overlay.o vmwgfx_marker.o vmwgfx_gmrid_manager.o \
|
||||
vmwgfx_fence.o vmwgfx_dmabuf.o vmwgfx_scrn.o vmwgfx_context.o \
|
||||
vmwgfx_surface.o
|
||||
|
||||
obj-$(CONFIG_DRM_VMWGFX) := vmwgfx.o
|
1896
drivers/gpu/drm/vmwgfx/svga3d_reg.h
Normal file
1896
drivers/gpu/drm/vmwgfx/svga3d_reg.h
Normal file
File diff suppressed because it is too large
Load diff
909
drivers/gpu/drm/vmwgfx/svga3d_surfacedefs.h
Normal file
909
drivers/gpu/drm/vmwgfx/svga3d_surfacedefs.h
Normal file
|
@ -0,0 +1,909 @@
|
|||
/**************************************************************************
|
||||
*
|
||||
* Copyright © 2008-2012 VMware, Inc., Palo Alto, CA., USA
|
||||
* All Rights Reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the
|
||||
* "Software"), to deal in the Software without restriction, including
|
||||
* without limitation the rights to use, copy, modify, merge, publish,
|
||||
* distribute, sub license, and/or sell copies of the Software, and to
|
||||
* permit persons to whom the Software is furnished to do so, subject to
|
||||
* the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the
|
||||
* next paragraph) shall be included in all copies or substantial portions
|
||||
* of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
|
||||
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
|
||||
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
|
||||
* USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
**************************************************************************/
|
||||
|
||||
#ifdef __KERNEL__
|
||||
|
||||
#include <drm/vmwgfx_drm.h>
|
||||
#define surf_size_struct struct drm_vmw_size
|
||||
|
||||
#else /* __KERNEL__ */
|
||||
|
||||
#ifndef ARRAY_SIZE
|
||||
#define ARRAY_SIZE(_A) (sizeof(_A) / sizeof((_A)[0]))
|
||||
#endif /* ARRAY_SIZE */
|
||||
|
||||
#define DIV_ROUND_UP(x, y) (((x) + (y) - 1) / (y))
|
||||
#define max_t(type, x, y) ((x) > (y) ? (x) : (y))
|
||||
#define surf_size_struct SVGA3dSize
|
||||
#define u32 uint32
|
||||
|
||||
#endif /* __KERNEL__ */
|
||||
|
||||
#include "svga3d_reg.h"
|
||||
|
||||
/*
|
||||
* enum svga3d_block_desc describes the active data channels in a block.
|
||||
*
|
||||
* There can be at-most four active channels in a block:
|
||||
* 1. Red, bump W, luminance and depth are stored in the first channel.
|
||||
* 2. Green, bump V and stencil are stored in the second channel.
|
||||
* 3. Blue and bump U are stored in the third channel.
|
||||
* 4. Alpha and bump Q are stored in the fourth channel.
|
||||
*
|
||||
* Block channels can be used to store compressed and buffer data:
|
||||
* 1. For compressed formats, only the data channel is used and its size
|
||||
* is equal to that of a singular block in the compression scheme.
|
||||
* 2. For buffer formats, only the data channel is used and its size is
|
||||
* exactly one byte in length.
|
||||
* 3. In each case the bit depth represent the size of a singular block.
|
||||
*
|
||||
* Note: Compressed and IEEE formats do not use the bitMask structure.
|
||||
*/
|
||||
|
||||
enum svga3d_block_desc {
|
||||
SVGA3DBLOCKDESC_NONE = 0, /* No channels are active */
|
||||
SVGA3DBLOCKDESC_BLUE = 1 << 0, /* Block with red channel
|
||||
data */
|
||||
SVGA3DBLOCKDESC_U = 1 << 0, /* Block with bump U channel
|
||||
data */
|
||||
SVGA3DBLOCKDESC_UV_VIDEO = 1 << 7, /* Block with alternating video
|
||||
U and V */
|
||||
SVGA3DBLOCKDESC_GREEN = 1 << 1, /* Block with green channel
|
||||
data */
|
||||
SVGA3DBLOCKDESC_V = 1 << 1, /* Block with bump V channel
|
||||
data */
|
||||
SVGA3DBLOCKDESC_STENCIL = 1 << 1, /* Block with a stencil
|
||||
channel */
|
||||
SVGA3DBLOCKDESC_RED = 1 << 2, /* Block with blue channel
|
||||
data */
|
||||
SVGA3DBLOCKDESC_W = 1 << 2, /* Block with bump W channel
|
||||
data */
|
||||
SVGA3DBLOCKDESC_LUMINANCE = 1 << 2, /* Block with luminance channel
|
||||
data */
|
||||
SVGA3DBLOCKDESC_Y = 1 << 2, /* Block with video luminance
|
||||
data */
|
||||
SVGA3DBLOCKDESC_DEPTH = 1 << 2, /* Block with depth channel */
|
||||
SVGA3DBLOCKDESC_ALPHA = 1 << 3, /* Block with an alpha
|
||||
channel */
|
||||
SVGA3DBLOCKDESC_Q = 1 << 3, /* Block with bump Q channel
|
||||
data */
|
||||
SVGA3DBLOCKDESC_BUFFER = 1 << 4, /* Block stores 1 byte of
|
||||
data */
|
||||
SVGA3DBLOCKDESC_COMPRESSED = 1 << 5, /* Block stores n bytes of
|
||||
data depending on the
|
||||
compression method used */
|
||||
SVGA3DBLOCKDESC_IEEE_FP = 1 << 6, /* Block stores data in an IEEE
|
||||
floating point
|
||||
representation in
|
||||
all channels */
|
||||
SVGA3DBLOCKDESC_PLANAR_YUV = 1 << 8, /* Three separate blocks store
|
||||
data. */
|
||||
SVGA3DBLOCKDESC_U_VIDEO = 1 << 9, /* Block with U video data */
|
||||
SVGA3DBLOCKDESC_V_VIDEO = 1 << 10, /* Block with V video data */
|
||||
SVGA3DBLOCKDESC_EXP = 1 << 11, /* Shared exponent */
|
||||
SVGA3DBLOCKDESC_SRGB = 1 << 12, /* Data is in sRGB format */
|
||||
SVGA3DBLOCKDESC_2PLANAR_YUV = 1 << 13, /* 2 planes of Y, UV,
|
||||
e.g., NV12. */
|
||||
SVGA3DBLOCKDESC_3PLANAR_YUV = 1 << 14, /* 3 planes of separate
|
||||
Y, U, V, e.g., YV12. */
|
||||
|
||||
SVGA3DBLOCKDESC_RG = SVGA3DBLOCKDESC_RED |
|
||||
SVGA3DBLOCKDESC_GREEN,
|
||||
SVGA3DBLOCKDESC_RGB = SVGA3DBLOCKDESC_RG |
|
||||
SVGA3DBLOCKDESC_BLUE,
|
||||
SVGA3DBLOCKDESC_RGB_SRGB = SVGA3DBLOCKDESC_RGB |
|
||||
SVGA3DBLOCKDESC_SRGB,
|
||||
SVGA3DBLOCKDESC_RGBA = SVGA3DBLOCKDESC_RGB |
|
||||
SVGA3DBLOCKDESC_ALPHA,
|
||||
SVGA3DBLOCKDESC_RGBA_SRGB = SVGA3DBLOCKDESC_RGBA |
|
||||
SVGA3DBLOCKDESC_SRGB,
|
||||
SVGA3DBLOCKDESC_UV = SVGA3DBLOCKDESC_U |
|
||||
SVGA3DBLOCKDESC_V,
|
||||
SVGA3DBLOCKDESC_UVL = SVGA3DBLOCKDESC_UV |
|
||||
SVGA3DBLOCKDESC_LUMINANCE,
|
||||
SVGA3DBLOCKDESC_UVW = SVGA3DBLOCKDESC_UV |
|
||||
SVGA3DBLOCKDESC_W,
|
||||
SVGA3DBLOCKDESC_UVWA = SVGA3DBLOCKDESC_UVW |
|
||||
SVGA3DBLOCKDESC_ALPHA,
|
||||
SVGA3DBLOCKDESC_UVWQ = SVGA3DBLOCKDESC_U |
|
||||
SVGA3DBLOCKDESC_V |
|
||||
SVGA3DBLOCKDESC_W |
|
||||
SVGA3DBLOCKDESC_Q,
|
||||
SVGA3DBLOCKDESC_LA = SVGA3DBLOCKDESC_LUMINANCE |
|
||||
SVGA3DBLOCKDESC_ALPHA,
|
||||
SVGA3DBLOCKDESC_R_FP = SVGA3DBLOCKDESC_RED |
|
||||
SVGA3DBLOCKDESC_IEEE_FP,
|
||||
SVGA3DBLOCKDESC_RG_FP = SVGA3DBLOCKDESC_R_FP |
|
||||
SVGA3DBLOCKDESC_GREEN,
|
||||
SVGA3DBLOCKDESC_RGB_FP = SVGA3DBLOCKDESC_RG_FP |
|
||||
SVGA3DBLOCKDESC_BLUE,
|
||||
SVGA3DBLOCKDESC_RGBA_FP = SVGA3DBLOCKDESC_RGB_FP |
|
||||
SVGA3DBLOCKDESC_ALPHA,
|
||||
SVGA3DBLOCKDESC_DS = SVGA3DBLOCKDESC_DEPTH |
|
||||
SVGA3DBLOCKDESC_STENCIL,
|
||||
SVGA3DBLOCKDESC_YUV = SVGA3DBLOCKDESC_UV_VIDEO |
|
||||
SVGA3DBLOCKDESC_Y,
|
||||
SVGA3DBLOCKDESC_AYUV = SVGA3DBLOCKDESC_ALPHA |
|
||||
SVGA3DBLOCKDESC_Y |
|
||||
SVGA3DBLOCKDESC_U_VIDEO |
|
||||
SVGA3DBLOCKDESC_V_VIDEO,
|
||||
SVGA3DBLOCKDESC_RGBE = SVGA3DBLOCKDESC_RGB |
|
||||
SVGA3DBLOCKDESC_EXP,
|
||||
SVGA3DBLOCKDESC_COMPRESSED_SRGB = SVGA3DBLOCKDESC_COMPRESSED |
|
||||
SVGA3DBLOCKDESC_SRGB,
|
||||
SVGA3DBLOCKDESC_NV12 = SVGA3DBLOCKDESC_PLANAR_YUV |
|
||||
SVGA3DBLOCKDESC_2PLANAR_YUV,
|
||||
SVGA3DBLOCKDESC_YV12 = SVGA3DBLOCKDESC_PLANAR_YUV |
|
||||
SVGA3DBLOCKDESC_3PLANAR_YUV,
|
||||
};
|
||||
|
||||
/*
|
||||
* SVGA3dSurfaceDesc describes the actual pixel data.
|
||||
*
|
||||
* This structure provides the following information:
|
||||
* 1. Block description.
|
||||
* 2. Dimensions of a block in the surface.
|
||||
* 3. Size of block in bytes.
|
||||
* 4. Bit depth of the pixel data.
|
||||
* 5. Channel bit depths and masks (if applicable).
|
||||
*/
|
||||
#define SVGA3D_CHANNEL_DEF(type) \
|
||||
struct { \
|
||||
union { \
|
||||
type blue; \
|
||||
type u; \
|
||||
type uv_video; \
|
||||
type u_video; \
|
||||
}; \
|
||||
union { \
|
||||
type green; \
|
||||
type v; \
|
||||
type stencil; \
|
||||
type v_video; \
|
||||
}; \
|
||||
union { \
|
||||
type red; \
|
||||
type w; \
|
||||
type luminance; \
|
||||
type y; \
|
||||
type depth; \
|
||||
type data; \
|
||||
}; \
|
||||
union { \
|
||||
type alpha; \
|
||||
type q; \
|
||||
type exp; \
|
||||
}; \
|
||||
}
|
||||
|
||||
struct svga3d_surface_desc {
|
||||
enum svga3d_block_desc block_desc;
|
||||
surf_size_struct block_size;
|
||||
u32 bytes_per_block;
|
||||
u32 pitch_bytes_per_block;
|
||||
|
||||
struct {
|
||||
u32 total;
|
||||
SVGA3D_CHANNEL_DEF(uint8);
|
||||
} bit_depth;
|
||||
|
||||
struct {
|
||||
SVGA3D_CHANNEL_DEF(uint8);
|
||||
} bit_offset;
|
||||
};
|
||||
|
||||
static const struct svga3d_surface_desc svga3d_surface_descs[] = {
|
||||
{SVGA3DBLOCKDESC_NONE,
|
||||
{1, 1, 1}, 0, 0, {0, {{0}, {0}, {0}, {0} } },
|
||||
{{{0}, {0}, {0}, {0} } } }, /* SVGA3D_FORMAT_INVALID */
|
||||
|
||||
{SVGA3DBLOCKDESC_RGB,
|
||||
{1, 1, 1}, 4, 4, {24, {{8}, {8}, {8}, {0} } },
|
||||
{{{0}, {8}, {16}, {24} } } }, /* SVGA3D_X8R8G8B8 */
|
||||
|
||||
{SVGA3DBLOCKDESC_RGBA,
|
||||
{1, 1, 1}, 4, 4, {32, {{8}, {8}, {8}, {8} } },
|
||||
{{{0}, {8}, {16}, {24} } } }, /* SVGA3D_A8R8G8B8 */
|
||||
|
||||
{SVGA3DBLOCKDESC_RGB,
|
||||
{1, 1, 1}, 2, 2, {16, {{5}, {6}, {5}, {0} } },
|
||||
{{{0}, {5}, {11}, {0} } } }, /* SVGA3D_R5G6B5 */
|
||||
|
||||
{SVGA3DBLOCKDESC_RGB,
|
||||
{1, 1, 1}, 2, 2, {15, {{5}, {5}, {5}, {0} } },
|
||||
{{{0}, {5}, {10}, {0} } } }, /* SVGA3D_X1R5G5B5 */
|
||||
|
||||
{SVGA3DBLOCKDESC_RGBA,
|
||||
{1, 1, 1}, 2, 2, {16, {{5}, {5}, {5}, {1} } },
|
||||
{{{0}, {5}, {10}, {15} } } }, /* SVGA3D_A1R5G5B5 */
|
||||
|
||||
{SVGA3DBLOCKDESC_RGBA,
|
||||
{1, 1, 1}, 2, 2, {16, {{4}, {4}, {4}, {4} } },
|
||||
{{{0}, {4}, {8}, {12} } } }, /* SVGA3D_A4R4G4B4 */
|
||||
|
||||
{SVGA3DBLOCKDESC_DEPTH,
|
||||
{1, 1, 1}, 4, 4, {32, {{0}, {0}, {32}, {0} } },
|
||||
{{{0}, {0}, {0}, {0} } } }, /* SVGA3D_Z_D32 */
|
||||
|
||||
{SVGA3DBLOCKDESC_DEPTH,
|
||||
{1, 1, 1}, 2, 2, {16, {{0}, {0}, {16}, {0} } },
|
||||
{{{0}, {0}, {0}, {0} } } }, /* SVGA3D_Z_D16 */
|
||||
|
||||
{SVGA3DBLOCKDESC_DS,
|
||||
{1, 1, 1}, 4, 4, {32, {{0}, {8}, {24}, {0} } },
|
||||
{{{0}, {24}, {0}, {0} } } }, /* SVGA3D_Z_D24S8 */
|
||||
|
||||
{SVGA3DBLOCKDESC_DS,
|
||||
{1, 1, 1}, 2, 2, {16, {{0}, {1}, {15}, {0} } },
|
||||
{{{0}, {15}, {0}, {0} } } }, /* SVGA3D_Z_D15S1 */
|
||||
|
||||
{SVGA3DBLOCKDESC_LUMINANCE,
|
||||
{1, 1, 1}, 1, 1, {8, {{0}, {0}, {8}, {0} } },
|
||||
{{{0}, {0}, {0}, {0} } } }, /* SVGA3D_LUMINANCE8 */
|
||||
|
||||
{SVGA3DBLOCKDESC_LA,
|
||||
{1, 1, 1}, 1, 1, {8, {{0}, {0}, {4}, {4} } },
|
||||
{{{0}, {0}, {0}, {4} } } }, /* SVGA3D_LUMINANCE4_ALPHA4 */
|
||||
|
||||
{SVGA3DBLOCKDESC_LUMINANCE,
|
||||
{1, 1, 1}, 2, 2, {16, {{0}, {0}, {16}, {0} } },
|
||||
{{{0}, {0}, {0}, {0} } } }, /* SVGA3D_LUMINANCE16 */
|
||||
|
||||
{SVGA3DBLOCKDESC_LA,
|
||||
{1, 1, 1}, 2, 2, {16, {{0}, {0}, {8}, {8} } },
|
||||
{{{0}, {0}, {0}, {8} } } }, /* SVGA3D_LUMINANCE8_ALPHA8 */
|
||||
|
||||
{SVGA3DBLOCKDESC_COMPRESSED,
|
||||
{4, 4, 1}, 8, 8, {64, {{0}, {0}, {64}, {0} } },
|
||||
{{{0}, {0}, {0}, {0} } } }, /* SVGA3D_DXT1 */
|
||||
|
||||
{SVGA3DBLOCKDESC_COMPRESSED,
|
||||
{4, 4, 1}, 16, 16, {128, {{0}, {0}, {128}, {0} } },
|
||||
{{{0}, {0}, {0}, {0} } } }, /* SVGA3D_DXT2 */
|
||||
|
||||
{SVGA3DBLOCKDESC_COMPRESSED,
|
||||
{4, 4, 1}, 16, 16, {128, {{0}, {0}, {128}, {0} } },
|
||||
{{{0}, {0}, {0}, {0} } } }, /* SVGA3D_DXT3 */
|
||||
|
||||
{SVGA3DBLOCKDESC_COMPRESSED,
|
||||
{4, 4, 1}, 16, 16, {128, {{0}, {0}, {128}, {0} } },
|
||||
{{{0}, {0}, {0}, {0} } } }, /* SVGA3D_DXT4 */
|
||||
|
||||
{SVGA3DBLOCKDESC_COMPRESSED,
|
||||
{4, 4, 1}, 16, 16, {128, {{0}, {0}, {128}, {0} } },
|
||||
{{{0}, {0}, {0}, {0} } } }, /* SVGA3D_DXT5 */
|
||||
|
||||
{SVGA3DBLOCKDESC_UV,
|
||||
{1, 1, 1}, 2, 2, {16, {{0}, {0}, {8}, {8} } },
|
||||
{{{0}, {0}, {0}, {8} } } }, /* SVGA3D_BUMPU8V8 */
|
||||
|
||||
{SVGA3DBLOCKDESC_UVL,
|
||||
{1, 1, 1}, 2, 2, {16, {{5}, {5}, {6}, {0} } },
|
||||
{{{11}, {6}, {0}, {0} } } }, /* SVGA3D_BUMPL6V5U5 */
|
||||
|
||||
{SVGA3DBLOCKDESC_UVL,
|
||||
{1, 1, 1}, 4, 4, {32, {{8}, {8}, {8}, {0} } },
|
||||
{{{16}, {8}, {0}, {0} } } }, /* SVGA3D_BUMPX8L8V8U8 */
|
||||
|
||||
{SVGA3DBLOCKDESC_UVL,
|
||||
{1, 1, 1}, 3, 3, {24, {{8}, {8}, {8}, {0} } },
|
||||
{{{16}, {8}, {0}, {0} } } }, /* SVGA3D_BUMPL8V8U8 */
|
||||
|
||||
{SVGA3DBLOCKDESC_RGBA_FP,
|
||||
{1, 1, 1}, 8, 8, {64, {{16}, {16}, {16}, {16} } },
|
||||
{{{32}, {16}, {0}, {48} } } }, /* SVGA3D_ARGB_S10E5 */
|
||||
|
||||
{SVGA3DBLOCKDESC_RGBA_FP,
|
||||
{1, 1, 1}, 16, 16, {128, {{32}, {32}, {32}, {32} } },
|
||||
{{{64}, {32}, {0}, {96} } } }, /* SVGA3D_ARGB_S23E8 */
|
||||
|
||||
{SVGA3DBLOCKDESC_RGBA,
|
||||
{1, 1, 1}, 4, 4, {32, {{10}, {10}, {10}, {2} } },
|
||||
{{{0}, {10}, {20}, {30} } } }, /* SVGA3D_A2R10G10B10 */
|
||||
|
||||
{SVGA3DBLOCKDESC_UV,
|
||||
{1, 1, 1}, 2, 2, {16, {{8}, {8}, {0}, {0} } },
|
||||
{{{8}, {0}, {0}, {0} } } }, /* SVGA3D_V8U8 */
|
||||
|
||||
{SVGA3DBLOCKDESC_UVWQ,
|
||||
{1, 1, 1}, 4, 4, {32, {{8}, {8}, {8}, {8} } },
|
||||
{{{24}, {16}, {8}, {0} } } }, /* SVGA3D_Q8W8V8U8 */
|
||||
|
||||
{SVGA3DBLOCKDESC_UV,
|
||||
{1, 1, 1}, 2, 2, {16, {{8}, {8}, {0}, {0} } },
|
||||
{{{8}, {0}, {0}, {0} } } }, /* SVGA3D_CxV8U8 */
|
||||
|
||||
{SVGA3DBLOCKDESC_UVL,
|
||||
{1, 1, 1}, 4, 4, {24, {{8}, {8}, {8}, {0} } },
|
||||
{{{16}, {8}, {0}, {0} } } }, /* SVGA3D_X8L8V8U8 */
|
||||
|
||||
{SVGA3DBLOCKDESC_UVWA,
|
||||
{1, 1, 1}, 4, 4, {32, {{10}, {10}, {10}, {2} } },
|
||||
{{{0}, {10}, {20}, {30} } } }, /* SVGA3D_A2W10V10U10 */
|
||||
|
||||
{SVGA3DBLOCKDESC_ALPHA,
|
||||
{1, 1, 1}, 1, 1, {8, {{0}, {0}, {0}, {8} } },
|
||||
{{{0}, {0}, {0}, {0} } } }, /* SVGA3D_ALPHA8 */
|
||||
|
||||
{SVGA3DBLOCKDESC_R_FP,
|
||||
{1, 1, 1}, 2, 2, {16, {{0}, {0}, {16}, {0} } },
|
||||
{{{0}, {0}, {0}, {0} } } }, /* SVGA3D_R_S10E5 */
|
||||
|
||||
{SVGA3DBLOCKDESC_R_FP,
|
||||
{1, 1, 1}, 4, 4, {32, {{0}, {0}, {32}, {0} } },
|
||||
{{{0}, {0}, {0}, {0} } } }, /* SVGA3D_R_S23E8 */
|
||||
|
||||
{SVGA3DBLOCKDESC_RG_FP,
|
||||
{1, 1, 1}, 4, 4, {32, {{0}, {16}, {16}, {0} } },
|
||||
{{{0}, {16}, {0}, {0} } } }, /* SVGA3D_RG_S10E5 */
|
||||
|
||||
{SVGA3DBLOCKDESC_RG_FP,
|
||||
{1, 1, 1}, 8, 8, {64, {{0}, {32}, {32}, {0} } },
|
||||
{{{0}, {32}, {0}, {0} } } }, /* SVGA3D_RG_S23E8 */
|
||||
|
||||
{SVGA3DBLOCKDESC_BUFFER,
|
||||
{1, 1, 1}, 1, 1, {8, {{0}, {0}, {8}, {0} } },
|
||||
{{{0}, {0}, {0}, {0} } } }, /* SVGA3D_BUFFER */
|
||||
|
||||
{SVGA3DBLOCKDESC_DEPTH,
|
||||
{1, 1, 1}, 4, 4, {32, {{0}, {0}, {24}, {0} } },
|
||||
{{{0}, {24}, {0}, {0} } } }, /* SVGA3D_Z_D24X8 */
|
||||
|
||||
{SVGA3DBLOCKDESC_UV,
|
||||
{1, 1, 1}, 4, 4, {32, {{16}, {16}, {0}, {0} } },
|
||||
{{{16}, {0}, {0}, {0} } } }, /* SVGA3D_V16U16 */
|
||||
|
||||
{SVGA3DBLOCKDESC_RG,
|
||||
{1, 1, 1}, 4, 4, {32, {{0}, {16}, {16}, {0} } },
|
||||
{{{0}, {0}, {16}, {0} } } }, /* SVGA3D_G16R16 */
|
||||
|
||||
{SVGA3DBLOCKDESC_RGBA,
|
||||
{1, 1, 1}, 8, 8, {64, {{16}, {16}, {16}, {16} } },
|
||||
{{{32}, {16}, {0}, {48} } } }, /* SVGA3D_A16B16G16R16 */
|
||||
|
||||
{SVGA3DBLOCKDESC_YUV,
|
||||
{1, 1, 1}, 2, 2, {16, {{8}, {0}, {8}, {0} } },
|
||||
{{{0}, {0}, {8}, {0} } } }, /* SVGA3D_UYVY */
|
||||
|
||||
{SVGA3DBLOCKDESC_YUV,
|
||||
{1, 1, 1}, 2, 2, {16, {{8}, {0}, {8}, {0} } },
|
||||
{{{8}, {0}, {0}, {0} } } }, /* SVGA3D_YUY2 */
|
||||
|
||||
{SVGA3DBLOCKDESC_NV12,
|
||||
{2, 2, 1}, 6, 2, {48, {{0}, {0}, {48}, {0} } },
|
||||
{{{0}, {0}, {0}, {0} } } }, /* SVGA3D_NV12 */
|
||||
|
||||
{SVGA3DBLOCKDESC_AYUV,
|
||||
{1, 1, 1}, 4, 4, {32, {{8}, {8}, {8}, {8} } },
|
||||
{{{0}, {8}, {16}, {24} } } }, /* SVGA3D_AYUV */
|
||||
|
||||
{SVGA3DBLOCKDESC_RGBA,
|
||||
{1, 1, 1}, 16, 16, {128, {{32}, {32}, {32}, {32} } },
|
||||
{{{64}, {32}, {0}, {96} } } }, /* SVGA3D_R32G32B32A32_TYPELESS */
|
||||
|
||||
{SVGA3DBLOCKDESC_RGBA,
|
||||
{1, 1, 1}, 16, 16, {128, {{32}, {32}, {32}, {32} } },
|
||||
{{{64}, {32}, {0}, {96} } } }, /* SVGA3D_R32G32B32A32_UINT */
|
||||
|
||||
{SVGA3DBLOCKDESC_UVWQ,
|
||||
{1, 1, 1}, 16, 16, {128, {{32}, {32}, {32}, {32} } },
|
||||
{{{64}, {32}, {0}, {96} } } }, /* SVGA3D_R32G32B32A32_SINT */
|
||||
|
||||
{SVGA3DBLOCKDESC_RGB,
|
||||
{1, 1, 1}, 12, 12, {96, {{32}, {32}, {32}, {0} } },
|
||||
{{{64}, {32}, {0}, {0} } } }, /* SVGA3D_R32G32B32_TYPELESS */
|
||||
|
||||
{SVGA3DBLOCKDESC_RGB_FP,
|
||||
{1, 1, 1}, 12, 12, {96, {{32}, {32}, {32}, {0} } },
|
||||
{{{64}, {32}, {0}, {0} } } }, /* SVGA3D_R32G32B32_FLOAT */
|
||||
|
||||
{SVGA3DBLOCKDESC_RGB,
|
||||
{1, 1, 1}, 12, 12, {96, {{32}, {32}, {32}, {0} } },
|
||||
{{{64}, {32}, {0}, {0} } } }, /* SVGA3D_R32G32B32_UINT */
|
||||
|
||||
{SVGA3DBLOCKDESC_UVW,
|
||||
{1, 1, 1}, 12, 12, {96, {{32}, {32}, {32}, {0} } },
|
||||
{{{64}, {32}, {0}, {0} } } }, /* SVGA3D_R32G32B32_SINT */
|
||||
|
||||
{SVGA3DBLOCKDESC_RGBA,
|
||||
{1, 1, 1}, 8, 8, {64, {{16}, {16}, {16}, {16} } },
|
||||
{{{32}, {16}, {0}, {48} } } }, /* SVGA3D_R16G16B16A16_TYPELESS */
|
||||
|
||||
{SVGA3DBLOCKDESC_RGBA,
|
||||
{1, 1, 1}, 8, 8, {64, {{16}, {16}, {16}, {16} } },
|
||||
{{{32}, {16}, {0}, {48} } } }, /* SVGA3D_R16G16B16A16_UINT */
|
||||
|
||||
{SVGA3DBLOCKDESC_UVWQ,
|
||||
{1, 1, 1}, 8, 8, {64, {{16}, {16}, {16}, {16} } },
|
||||
{{{32}, {16}, {0}, {48} } } }, /* SVGA3D_R16G16B16A16_SNORM */
|
||||
|
||||
{SVGA3DBLOCKDESC_UVWQ,
|
||||
{1, 1, 1}, 8, 8, {64, {{16}, {16}, {16}, {16} } },
|
||||
{{{32}, {16}, {0}, {48} } } }, /* SVGA3D_R16G16B16A16_SINT */
|
||||
|
||||
{SVGA3DBLOCKDESC_RG,
|
||||
{1, 1, 1}, 8, 8, {64, {{0}, {32}, {32}, {0} } },
|
||||
{{{0}, {32}, {0}, {0} } } }, /* SVGA3D_R32G32_TYPELESS */
|
||||
|
||||
{SVGA3DBLOCKDESC_RG,
|
||||
{1, 1, 1}, 8, 8, {64, {{0}, {32}, {32}, {0} } },
|
||||
{{{0}, {32}, {0}, {0} } } }, /* SVGA3D_R32G32_UINT */
|
||||
|
||||
{SVGA3DBLOCKDESC_UV,
|
||||
{1, 1, 1}, 8, 8, {64, {{0}, {32}, {32}, {0} } },
|
||||
{{{0}, {32}, {0}, {0} } } }, /* SVGA3D_R32G32_SINT */
|
||||
|
||||
{SVGA3DBLOCKDESC_RG,
|
||||
{1, 1, 1}, 8, 8, {64, {{0}, {8}, {32}, {0} } },
|
||||
{{{0}, {32}, {0}, {0} } } }, /* SVGA3D_R32G8X24_TYPELESS */
|
||||
|
||||
{SVGA3DBLOCKDESC_DS,
|
||||
{1, 1, 1}, 8, 8, {64, {{0}, {8}, {32}, {0} } },
|
||||
{{{0}, {32}, {0}, {0} } } }, /* SVGA3D_D32_FLOAT_S8X24_UINT */
|
||||
|
||||
{SVGA3DBLOCKDESC_R_FP,
|
||||
{1, 1, 1}, 8, 8, {64, {{0}, {0}, {32}, {0} } },
|
||||
{{{0}, {0}, {0}, {0} } } }, /* SVGA3D_R32_FLOAT_X8_X24_TYPELESS */
|
||||
|
||||
{SVGA3DBLOCKDESC_GREEN,
|
||||
{1, 1, 1}, 8, 8, {64, {{0}, {8}, {0}, {0} } },
|
||||
{{{0}, {32}, {0}, {0} } } }, /* SVGA3D_X32_TYPELESS_G8X24_UINT */
|
||||
|
||||
{SVGA3DBLOCKDESC_RGBA,
|
||||
{1, 1, 1}, 4, 4, {32, {{10}, {10}, {10}, {2} } },
|
||||
{{{0}, {10}, {20}, {30} } } }, /* SVGA3D_R10G10B10A2_TYPELESS */
|
||||
|
||||
{SVGA3DBLOCKDESC_RGBA,
|
||||
{1, 1, 1}, 4, 4, {32, {{10}, {10}, {10}, {2} } },
|
||||
{{{0}, {10}, {20}, {30} } } }, /* SVGA3D_R10G10B10A2_UINT */
|
||||
|
||||
{SVGA3DBLOCKDESC_RGB_FP,
|
||||
{1, 1, 1}, 4, 4, {32, {{10}, {11}, {11}, {0} } },
|
||||
{{{0}, {10}, {21}, {0} } } }, /* SVGA3D_R11G11B10_FLOAT */
|
||||
|
||||
{SVGA3DBLOCKDESC_RGBA,
|
||||
{1, 1, 1}, 4, 4, {32, {{8}, {8}, {8}, {8} } },
|
||||
{{{16}, {8}, {0}, {24} } } }, /* SVGA3D_R8G8B8A8_TYPELESS */
|
||||
|
||||
{SVGA3DBLOCKDESC_RGBA,
|
||||
{1, 1, 1}, 4, 4, {32, {{8}, {8}, {8}, {8} } },
|
||||
{{{16}, {8}, {0}, {24} } } }, /* SVGA3D_R8G8B8A8_UNORM */
|
||||
|
||||
{SVGA3DBLOCKDESC_RGBA_SRGB,
|
||||
{1, 1, 1}, 4, 4, {32, {{8}, {8}, {8}, {8} } },
|
||||
{{{16}, {8}, {0}, {24} } } }, /* SVGA3D_R8G8B8A8_UNORM_SRGB */
|
||||
|
||||
{SVGA3DBLOCKDESC_RGBA,
|
||||
{1, 1, 1}, 4, 4, {32, {{8}, {8}, {8}, {8} } },
|
||||
{{{16}, {8}, {0}, {24} } } }, /* SVGA3D_R8G8B8A8_UINT */
|
||||
|
||||
{SVGA3DBLOCKDESC_RGBA,
|
||||
{1, 1, 1}, 4, 4, {32, {{8}, {8}, {8}, {8} } },
|
||||
{{{16}, {8}, {0}, {24} } } }, /* SVGA3D_R8G8B8A8_SINT */
|
||||
|
||||
{SVGA3DBLOCKDESC_RG,
|
||||
{1, 1, 1}, 4, 4, {32, {{0}, {16}, {16}, {0} } },
|
||||
{{{0}, {16}, {0}, {0} } } }, /* SVGA3D_R16G16_TYPELESS */
|
||||
|
||||
{SVGA3DBLOCKDESC_RG_FP,
|
||||
{1, 1, 1}, 4, 4, {32, {{0}, {16}, {16}, {0} } },
|
||||
{{{0}, {16}, {0}, {0} } } }, /* SVGA3D_R16G16_UINT */
|
||||
|
||||
{SVGA3DBLOCKDESC_UV,
|
||||
{1, 1, 1}, 4, 4, {32, {{0}, {16}, {16}, {0} } },
|
||||
{{{0}, {16}, {0}, {0} } } }, /* SVGA3D_R16G16_SINT */
|
||||
|
||||
{SVGA3DBLOCKDESC_RED,
|
||||
{1, 1, 1}, 4, 4, {32, {{0}, {0}, {32}, {0} } },
|
||||
{{{0}, {0}, {0}, {0} } } }, /* SVGA3D_R32_TYPELESS */
|
||||
|
||||
{SVGA3DBLOCKDESC_DEPTH,
|
||||
{1, 1, 1}, 4, 4, {32, {{0}, {0}, {32}, {0} } },
|
||||
{{{0}, {0}, {0}, {0} } } }, /* SVGA3D_D32_FLOAT */
|
||||
|
||||
{SVGA3DBLOCKDESC_RED,
|
||||
{1, 1, 1}, 4, 4, {32, {{0}, {0}, {32}, {0} } },
|
||||
{{{0}, {0}, {0}, {0} } } }, /* SVGA3D_R32_UINT */
|
||||
|
||||
{SVGA3DBLOCKDESC_RED,
|
||||
{1, 1, 1}, 4, 4, {32, {{0}, {0}, {32}, {0} } },
|
||||
{{{0}, {0}, {0}, {0} } } }, /* SVGA3D_R32_SINT */
|
||||
|
||||
{SVGA3DBLOCKDESC_RG,
|
||||
{1, 1, 1}, 4, 4, {32, {{0}, {8}, {24}, {0} } },
|
||||
{{{0}, {24}, {0}, {0} } } }, /* SVGA3D_R24G8_TYPELESS */
|
||||
|
||||
{SVGA3DBLOCKDESC_DS,
|
||||
{1, 1, 1}, 4, 4, {32, {{0}, {8}, {24}, {0} } },
|
||||
{{{0}, {24}, {0}, {0} } } }, /* SVGA3D_D24_UNORM_S8_UINT */
|
||||
|
||||
{SVGA3DBLOCKDESC_RED,
|
||||
{1, 1, 1}, 4, 4, {32, {{0}, {0}, {24}, {0} } },
|
||||
{{{0}, {0}, {0}, {0} } } }, /* SVGA3D_R24_UNORM_X8_TYPELESS */
|
||||
|
||||
{SVGA3DBLOCKDESC_GREEN,
|
||||
{1, 1, 1}, 4, 4, {32, {{0}, {8}, {0}, {0} } },
|
||||
{{{0}, {24}, {0}, {0} } } }, /* SVGA3D_X24_TYPELESS_G8_UINT */
|
||||
|
||||
{SVGA3DBLOCKDESC_RG,
|
||||
{1, 1, 1}, 2, 2, {16, {{0}, {8}, {8}, {0} } },
|
||||
{{{0}, {8}, {0}, {0} } } }, /* SVGA3D_R8G8_TYPELESS */
|
||||
|
||||
{SVGA3DBLOCKDESC_RG,
|
||||
{1, 1, 1}, 2, 2, {16, {{0}, {8}, {8}, {0} } },
|
||||
{{{0}, {8}, {0}, {0} } } }, /* SVGA3D_R8G8_UNORM */
|
||||
|
||||
{SVGA3DBLOCKDESC_RG,
|
||||
{1, 1, 1}, 2, 2, {16, {{0}, {8}, {8}, {0} } },
|
||||
{{{0}, {8}, {0}, {0} } } }, /* SVGA3D_R8G8_UINT */
|
||||
|
||||
{SVGA3DBLOCKDESC_UV,
|
||||
{1, 1, 1}, 2, 2, {16, {{0}, {8}, {8}, {0} } },
|
||||
{{{0}, {8}, {0}, {0} } } }, /* SVGA3D_R8G8_SINT */
|
||||
|
||||
{SVGA3DBLOCKDESC_RED,
|
||||
{1, 1, 1}, 2, 2, {16, {{0}, {0}, {16}, {0} } },
|
||||
{{{0}, {0}, {0}, {0} } } }, /* SVGA3D_R16_TYPELESS */
|
||||
|
||||
{SVGA3DBLOCKDESC_RED,
|
||||
{1, 1, 1}, 2, 2, {16, {{0}, {0}, {16}, {0} } },
|
||||
{{{0}, {0}, {0}, {0} } } }, /* SVGA3D_R16_UNORM */
|
||||
|
||||
{SVGA3DBLOCKDESC_RED,
|
||||
{1, 1, 1}, 2, 2, {16, {{0}, {0}, {16}, {0} } },
|
||||
{{{0}, {0}, {0}, {0} } } }, /* SVGA3D_R16_UINT */
|
||||
|
||||
{SVGA3DBLOCKDESC_U,
|
||||
{1, 1, 1}, 2, 2, {16, {{0}, {0}, {16}, {0} } },
|
||||
{{{0}, {0}, {0}, {0} } } }, /* SVGA3D_R16_SNORM */
|
||||
|
||||
{SVGA3DBLOCKDESC_U,
|
||||
{1, 1, 1}, 2, 2, {16, {{0}, {0}, {16}, {0} } },
|
||||
{{{0}, {0}, {0}, {0} } } }, /* SVGA3D_R16_SINT */
|
||||
|
||||
{SVGA3DBLOCKDESC_RED,
|
||||
{1, 1, 1}, 1, 1, {8, {{0}, {0}, {8}, {0} } },
|
||||
{{{0}, {0}, {0}, {0} } } }, /* SVGA3D_R8_TYPELESS */
|
||||
|
||||
{SVGA3DBLOCKDESC_RED,
|
||||
{1, 1, 1}, 1, 1, {8, {{0}, {0}, {8}, {0} } },
|
||||
{{{0}, {0}, {0}, {0} } } }, /* SVGA3D_R8_UNORM */
|
||||
|
||||
{SVGA3DBLOCKDESC_RED,
|
||||
{1, 1, 1}, 1, 1, {8, {{0}, {0}, {8}, {0} } },
|
||||
{{{0}, {0}, {0}, {0} } } }, /* SVGA3D_R8_UINT */
|
||||
|
||||
{SVGA3DBLOCKDESC_U,
|
||||
{1, 1, 1}, 1, 1, {8, {{0}, {0}, {8}, {0} } },
|
||||
{{{0}, {0}, {0}, {0} } } }, /* SVGA3D_R8_SNORM */
|
||||
|
||||
{SVGA3DBLOCKDESC_U,
|
||||
{1, 1, 1}, 1, 1, {8, {{0}, {0}, {8}, {0} } },
|
||||
{{{0}, {0}, {0}, {0} } } }, /* SVGA3D_R8_SINT */
|
||||
|
||||
{SVGA3DBLOCKDESC_RED,
|
||||
{8, 1, 1}, 1, 1, {8, {{0}, {0}, {8}, {0} } },
|
||||
{{{0}, {0}, {0}, {0} } } }, /* SVGA3D_R1_UNORM */
|
||||
|
||||
{SVGA3DBLOCKDESC_RGBE,
|
||||
{1, 1, 1}, 4, 4, {32, {{9}, {9}, {9}, {5} } },
|
||||
{{{18}, {9}, {0}, {27} } } }, /* SVGA3D_R9G9B9E5_SHAREDEXP */
|
||||
|
||||
{SVGA3DBLOCKDESC_RG,
|
||||
{1, 1, 1}, 2, 2, {16, {{0}, {8}, {8}, {0} } },
|
||||
{{{0}, {8}, {0}, {0} } } }, /* SVGA3D_R8G8_B8G8_UNORM */
|
||||
|
||||
{SVGA3DBLOCKDESC_RG,
|
||||
{1, 1, 1}, 2, 2, {16, {{0}, {8}, {8}, {0} } },
|
||||
{{{0}, {8}, {0}, {0} } } }, /* SVGA3D_G8R8_G8B8_UNORM */
|
||||
|
||||
{SVGA3DBLOCKDESC_COMPRESSED,
|
||||
{4, 4, 1}, 8, 8, {64, {{0}, {0}, {64}, {0} } },
|
||||
{{{0}, {0}, {0}, {0} } } }, /* SVGA3D_BC1_TYPELESS */
|
||||
|
||||
{SVGA3DBLOCKDESC_COMPRESSED_SRGB,
|
||||
{4, 4, 1}, 8, 8, {64, {{0}, {0}, {64}, {0} } },
|
||||
{{{0}, {0}, {0}, {0} } } }, /* SVGA3D_BC1_UNORM_SRGB */
|
||||
|
||||
{SVGA3DBLOCKDESC_COMPRESSED,
|
||||
{4, 4, 1}, 16, 16, {128, {{0}, {0}, {128}, {0} } },
|
||||
{{{0}, {0}, {0}, {0} } } }, /* SVGA3D_BC2_TYPELESS */
|
||||
|
||||
{SVGA3DBLOCKDESC_COMPRESSED_SRGB,
|
||||
{4, 4, 1}, 16, 16, {128, {{0}, {0}, {128}, {0} } },
|
||||
{{{0}, {0}, {0}, {0} } } }, /* SVGA3D_BC2_UNORM_SRGB */
|
||||
|
||||
{SVGA3DBLOCKDESC_COMPRESSED,
|
||||
{4, 4, 1}, 16, 16, {128, {{0}, {0}, {128}, {0} } },
|
||||
{{{0}, {0}, {0}, {0} } } }, /* SVGA3D_BC3_TYPELESS */
|
||||
|
||||
{SVGA3DBLOCKDESC_COMPRESSED_SRGB,
|
||||
{4, 4, 1}, 16, 16, {128, {{0}, {0}, {128}, {0} } },
|
||||
{{{0}, {0}, {0}, {0} } } }, /* SVGA3D_BC3_UNORM_SRGB */
|
||||
|
||||
{SVGA3DBLOCKDESC_COMPRESSED,
|
||||
{4, 4, 1}, 8, 8, {64, {{0}, {0}, {64}, {0} } },
|
||||
{{{0}, {0}, {0}, {0} } } }, /* SVGA3D_BC4_TYPELESS */
|
||||
|
||||
{SVGA3DBLOCKDESC_COMPRESSED,
|
||||
{4, 4, 1}, 8, 8, {64, {{0}, {0}, {64}, {0} } },
|
||||
{{{0}, {0}, {0}, {0} } } }, /* SVGA3D_BC4_UNORM */
|
||||
|
||||
{SVGA3DBLOCKDESC_COMPRESSED,
|
||||
{4, 4, 1}, 8, 8, {64, {{0}, {0}, {64}, {0} } },
|
||||
{{{0}, {0}, {0}, {0} } } }, /* SVGA3D_BC4_SNORM */
|
||||
|
||||
{SVGA3DBLOCKDESC_COMPRESSED,
|
||||
{4, 4, 1}, 16, 16, {128, {{0}, {0}, {128}, {0} } },
|
||||
{{{0}, {0}, {0}, {0} } } }, /* SVGA3D_BC5_TYPELESS */
|
||||
|
||||
{SVGA3DBLOCKDESC_COMPRESSED,
|
||||
{4, 4, 1}, 16, 16, {128, {{0}, {0}, {128}, {0} } },
|
||||
{{{0}, {0}, {0}, {0} } } }, /* SVGA3D_BC5_UNORM */
|
||||
|
||||
{SVGA3DBLOCKDESC_COMPRESSED,
|
||||
{4, 4, 1}, 16, 16, {128, {{0}, {0}, {128}, {0} } },
|
||||
{{{0}, {0}, {0}, {0} } } }, /* SVGA3D_BC5_SNORM */
|
||||
|
||||
{SVGA3DBLOCKDESC_RGBA,
|
||||
{1, 1, 1}, 4, 4, {32, {{10}, {10}, {10}, {2} } },
|
||||
{{{0}, {10}, {20}, {30} } } }, /* SVGA3D_R10G10B10_XR_BIAS_A2_UNORM */
|
||||
|
||||
{SVGA3DBLOCKDESC_RGBA,
|
||||
{1, 1, 1}, 4, 4, {32, {{8}, {8}, {8}, {8} } },
|
||||
{{{0}, {8}, {16}, {24} } } }, /* SVGA3D_B8G8R8A8_TYPELESS */
|
||||
|
||||
{SVGA3DBLOCKDESC_RGBA_SRGB,
|
||||
{1, 1, 1}, 4, 4, {32, {{8}, {8}, {8}, {8} } },
|
||||
{{{0}, {8}, {16}, {24} } } }, /* SVGA3D_B8G8R8A8_UNORM_SRGB */
|
||||
|
||||
{SVGA3DBLOCKDESC_RGB,
|
||||
{1, 1, 1}, 4, 4, {24, {{8}, {8}, {8}, {0} } },
|
||||
{{{0}, {8}, {16}, {24} } } }, /* SVGA3D_B8G8R8X8_TYPELESS */
|
||||
|
||||
{SVGA3DBLOCKDESC_RGB_SRGB,
|
||||
{1, 1, 1}, 4, 4, {24, {{8}, {8}, {8}, {0} } },
|
||||
{{{0}, {8}, {16}, {24} } } }, /* SVGA3D_B8G8R8X8_UNORM_SRGB */
|
||||
|
||||
{SVGA3DBLOCKDESC_DEPTH,
|
||||
{1, 1, 1}, 2, 2, {16, {{0}, {0}, {16}, {0} } },
|
||||
{{{0}, {0}, {0}, {0} } } }, /* SVGA3D_Z_DF16 */
|
||||
|
||||
{SVGA3DBLOCKDESC_DS,
|
||||
{1, 1, 1}, 4, 4, {32, {{0}, {8}, {24}, {0} } },
|
||||
{{{0}, {24}, {0}, {0} } } }, /* SVGA3D_Z_DF24 */
|
||||
|
||||
{SVGA3DBLOCKDESC_DS,
|
||||
{1, 1, 1}, 4, 4, {32, {{0}, {8}, {24}, {0} } },
|
||||
{{{0}, {24}, {0}, {0} } } }, /* SVGA3D_Z_D24S8_INT */
|
||||
};
|
||||
|
||||
static inline u32 clamped_umul32(u32 a, u32 b)
|
||||
{
|
||||
uint64_t tmp = (uint64_t) a*b;
|
||||
return (tmp > (uint64_t) ((u32) -1)) ? (u32) -1 : tmp;
|
||||
}
|
||||
|
||||
static inline const struct svga3d_surface_desc *
|
||||
svga3dsurface_get_desc(SVGA3dSurfaceFormat format)
|
||||
{
|
||||
if (format < ARRAY_SIZE(svga3d_surface_descs))
|
||||
return &svga3d_surface_descs[format];
|
||||
|
||||
return &svga3d_surface_descs[SVGA3D_FORMAT_INVALID];
|
||||
}
|
||||
|
||||
/*
|
||||
*----------------------------------------------------------------------
|
||||
*
|
||||
* svga3dsurface_get_mip_size --
|
||||
*
|
||||
* Given a base level size and the mip level, compute the size of
|
||||
* the mip level.
|
||||
*
|
||||
* Results:
|
||||
* See above.
|
||||
*
|
||||
* Side effects:
|
||||
* None.
|
||||
*
|
||||
*----------------------------------------------------------------------
|
||||
*/
|
||||
|
||||
static inline surf_size_struct
|
||||
svga3dsurface_get_mip_size(surf_size_struct base_level, u32 mip_level)
|
||||
{
|
||||
surf_size_struct size;
|
||||
|
||||
size.width = max_t(u32, base_level.width >> mip_level, 1);
|
||||
size.height = max_t(u32, base_level.height >> mip_level, 1);
|
||||
size.depth = max_t(u32, base_level.depth >> mip_level, 1);
|
||||
return size;
|
||||
}
|
||||
|
||||
static inline void
|
||||
svga3dsurface_get_size_in_blocks(const struct svga3d_surface_desc *desc,
|
||||
const surf_size_struct *pixel_size,
|
||||
surf_size_struct *block_size)
|
||||
{
|
||||
block_size->width = DIV_ROUND_UP(pixel_size->width,
|
||||
desc->block_size.width);
|
||||
block_size->height = DIV_ROUND_UP(pixel_size->height,
|
||||
desc->block_size.height);
|
||||
block_size->depth = DIV_ROUND_UP(pixel_size->depth,
|
||||
desc->block_size.depth);
|
||||
}
|
||||
|
||||
static inline bool
|
||||
svga3dsurface_is_planar_surface(const struct svga3d_surface_desc *desc)
|
||||
{
|
||||
return (desc->block_desc & SVGA3DBLOCKDESC_PLANAR_YUV) != 0;
|
||||
}
|
||||
|
||||
static inline u32
|
||||
svga3dsurface_calculate_pitch(const struct svga3d_surface_desc *desc,
|
||||
const surf_size_struct *size)
|
||||
{
|
||||
u32 pitch;
|
||||
surf_size_struct blocks;
|
||||
|
||||
svga3dsurface_get_size_in_blocks(desc, size, &blocks);
|
||||
|
||||
pitch = blocks.width * desc->pitch_bytes_per_block;
|
||||
|
||||
return pitch;
|
||||
}
|
||||
|
||||
/*
|
||||
*-----------------------------------------------------------------------------
|
||||
*
|
||||
* svga3dsurface_get_image_buffer_size --
|
||||
*
|
||||
* Return the number of bytes of buffer space required to store
|
||||
* one image of a surface, optionally using the specified pitch.
|
||||
*
|
||||
* If pitch is zero, it is assumed that rows are tightly packed.
|
||||
*
|
||||
* This function is overflow-safe. If the result would have
|
||||
* overflowed, instead we return MAX_UINT32.
|
||||
*
|
||||
* Results:
|
||||
* Byte count.
|
||||
*
|
||||
* Side effects:
|
||||
* None.
|
||||
*
|
||||
*-----------------------------------------------------------------------------
|
||||
*/
|
||||
|
||||
static inline u32
|
||||
svga3dsurface_get_image_buffer_size(const struct svga3d_surface_desc *desc,
|
||||
const surf_size_struct *size,
|
||||
u32 pitch)
|
||||
{
|
||||
surf_size_struct image_blocks;
|
||||
u32 slice_size, total_size;
|
||||
|
||||
svga3dsurface_get_size_in_blocks(desc, size, &image_blocks);
|
||||
|
||||
if (svga3dsurface_is_planar_surface(desc)) {
|
||||
total_size = clamped_umul32(image_blocks.width,
|
||||
image_blocks.height);
|
||||
total_size = clamped_umul32(total_size, image_blocks.depth);
|
||||
total_size = clamped_umul32(total_size, desc->bytes_per_block);
|
||||
return total_size;
|
||||
}
|
||||
|
||||
if (pitch == 0)
|
||||
pitch = svga3dsurface_calculate_pitch(desc, size);
|
||||
|
||||
slice_size = clamped_umul32(image_blocks.height, pitch);
|
||||
total_size = clamped_umul32(slice_size, image_blocks.depth);
|
||||
|
||||
return total_size;
|
||||
}
|
||||
|
||||
static inline u32
|
||||
svga3dsurface_get_serialized_size(SVGA3dSurfaceFormat format,
|
||||
surf_size_struct base_level_size,
|
||||
u32 num_mip_levels,
|
||||
bool cubemap)
|
||||
{
|
||||
const struct svga3d_surface_desc *desc = svga3dsurface_get_desc(format);
|
||||
u32 total_size = 0;
|
||||
u32 mip;
|
||||
|
||||
for (mip = 0; mip < num_mip_levels; mip++) {
|
||||
surf_size_struct size =
|
||||
svga3dsurface_get_mip_size(base_level_size, mip);
|
||||
total_size += svga3dsurface_get_image_buffer_size(desc,
|
||||
&size, 0);
|
||||
}
|
||||
|
||||
if (cubemap)
|
||||
total_size *= SVGA3D_MAX_SURFACE_FACES;
|
||||
|
||||
return total_size;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* svga3dsurface_get_pixel_offset - Compute the offset (in bytes) to a pixel
|
||||
* in an image (or volume).
|
||||
*
|
||||
* @width: The image width in pixels.
|
||||
* @height: The image height in pixels
|
||||
*/
|
||||
static inline u32
|
||||
svga3dsurface_get_pixel_offset(SVGA3dSurfaceFormat format,
|
||||
u32 width, u32 height,
|
||||
u32 x, u32 y, u32 z)
|
||||
{
|
||||
const struct svga3d_surface_desc *desc = svga3dsurface_get_desc(format);
|
||||
const u32 bw = desc->block_size.width, bh = desc->block_size.height;
|
||||
const u32 bd = desc->block_size.depth;
|
||||
const u32 rowstride = DIV_ROUND_UP(width, bw) * desc->bytes_per_block;
|
||||
const u32 imgstride = DIV_ROUND_UP(height, bh) * rowstride;
|
||||
const u32 offset = (z / bd * imgstride +
|
||||
y / bh * rowstride +
|
||||
x / bw * desc->bytes_per_block);
|
||||
return offset;
|
||||
}
|
||||
|
||||
|
||||
static inline u32
|
||||
svga3dsurface_get_image_offset(SVGA3dSurfaceFormat format,
|
||||
surf_size_struct baseLevelSize,
|
||||
u32 numMipLevels,
|
||||
u32 face,
|
||||
u32 mip)
|
||||
|
||||
{
|
||||
u32 offset;
|
||||
u32 mipChainBytes;
|
||||
u32 mipChainBytesToLevel;
|
||||
u32 i;
|
||||
const struct svga3d_surface_desc *desc;
|
||||
surf_size_struct mipSize;
|
||||
u32 bytes;
|
||||
|
||||
desc = svga3dsurface_get_desc(format);
|
||||
|
||||
mipChainBytes = 0;
|
||||
mipChainBytesToLevel = 0;
|
||||
for (i = 0; i < numMipLevels; i++) {
|
||||
mipSize = svga3dsurface_get_mip_size(baseLevelSize, i);
|
||||
bytes = svga3dsurface_get_image_buffer_size(desc, &mipSize, 0);
|
||||
mipChainBytes += bytes;
|
||||
if (i < mip)
|
||||
mipChainBytesToLevel += bytes;
|
||||
}
|
||||
|
||||
offset = mipChainBytes * face + mipChainBytesToLevel;
|
||||
|
||||
return offset;
|
||||
}
|
89
drivers/gpu/drm/vmwgfx/svga_escape.h
Normal file
89
drivers/gpu/drm/vmwgfx/svga_escape.h
Normal file
|
@ -0,0 +1,89 @@
|
|||
/**********************************************************
|
||||
* Copyright 2007-2009 VMware, Inc. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person
|
||||
* obtaining a copy of this software and associated documentation
|
||||
* files (the "Software"), to deal in the Software without
|
||||
* restriction, including without limitation the rights to use, copy,
|
||||
* modify, merge, publish, distribute, sublicense, and/or sell copies
|
||||
* of the Software, and to permit persons to whom the Software is
|
||||
* furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be
|
||||
* included in all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
||||
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
|
||||
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
|
||||
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
||||
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
* SOFTWARE.
|
||||
*
|
||||
**********************************************************/
|
||||
|
||||
/*
|
||||
* svga_escape.h --
|
||||
*
|
||||
* Definitions for our own (vendor-specific) SVGA Escape commands.
|
||||
*/
|
||||
|
||||
#ifndef _SVGA_ESCAPE_H_
|
||||
#define _SVGA_ESCAPE_H_
|
||||
|
||||
|
||||
/*
|
||||
* Namespace IDs for the escape command
|
||||
*/
|
||||
|
||||
#define SVGA_ESCAPE_NSID_VMWARE 0x00000000
|
||||
#define SVGA_ESCAPE_NSID_DEVEL 0xFFFFFFFF
|
||||
|
||||
|
||||
/*
|
||||
* Within SVGA_ESCAPE_NSID_VMWARE, we multiplex commands according to
|
||||
* the first DWORD of escape data (after the nsID and size). As a
|
||||
* guideline we're using the high word and low word as a major and
|
||||
* minor command number, respectively.
|
||||
*
|
||||
* Major command number allocation:
|
||||
*
|
||||
* 0000: Reserved
|
||||
* 0001: SVGA_ESCAPE_VMWARE_LOG (svga_binary_logger.h)
|
||||
* 0002: SVGA_ESCAPE_VMWARE_VIDEO (svga_overlay.h)
|
||||
* 0003: SVGA_ESCAPE_VMWARE_HINT (svga_escape.h)
|
||||
*/
|
||||
|
||||
#define SVGA_ESCAPE_VMWARE_MAJOR_MASK 0xFFFF0000
|
||||
|
||||
|
||||
/*
|
||||
* SVGA Hint commands.
|
||||
*
|
||||
* These escapes let the SVGA driver provide optional information to
|
||||
* he host about the state of the guest or guest applications. The
|
||||
* host can use these hints to make user interface or performance
|
||||
* decisions.
|
||||
*
|
||||
* Notes:
|
||||
*
|
||||
* - SVGA_ESCAPE_VMWARE_HINT_FULLSCREEN is deprecated for guests
|
||||
* that use the SVGA Screen Object extension. Instead of sending
|
||||
* this escape, use the SVGA_SCREEN_FULLSCREEN_HINT flag on your
|
||||
* Screen Object.
|
||||
*/
|
||||
|
||||
#define SVGA_ESCAPE_VMWARE_HINT 0x00030000
|
||||
#define SVGA_ESCAPE_VMWARE_HINT_FULLSCREEN 0x00030001 /* Deprecated */
|
||||
|
||||
typedef
|
||||
struct {
|
||||
uint32 command;
|
||||
uint32 fullscreen;
|
||||
struct {
|
||||
int32 x, y;
|
||||
} monitorPosition;
|
||||
} SVGAEscapeHintFullscreen;
|
||||
|
||||
#endif /* _SVGA_ESCAPE_H_ */
|
201
drivers/gpu/drm/vmwgfx/svga_overlay.h
Normal file
201
drivers/gpu/drm/vmwgfx/svga_overlay.h
Normal file
|
@ -0,0 +1,201 @@
|
|||
/**********************************************************
|
||||
* Copyright 2007-2009 VMware, Inc. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person
|
||||
* obtaining a copy of this software and associated documentation
|
||||
* files (the "Software"), to deal in the Software without
|
||||
* restriction, including without limitation the rights to use, copy,
|
||||
* modify, merge, publish, distribute, sublicense, and/or sell copies
|
||||
* of the Software, and to permit persons to whom the Software is
|
||||
* furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be
|
||||
* included in all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
||||
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
|
||||
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
|
||||
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
||||
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
* SOFTWARE.
|
||||
*
|
||||
**********************************************************/
|
||||
|
||||
/*
|
||||
* svga_overlay.h --
|
||||
*
|
||||
* Definitions for video-overlay support.
|
||||
*/
|
||||
|
||||
#ifndef _SVGA_OVERLAY_H_
|
||||
#define _SVGA_OVERLAY_H_
|
||||
|
||||
#include "svga_reg.h"
|
||||
|
||||
/*
|
||||
* Video formats we support
|
||||
*/
|
||||
|
||||
#define VMWARE_FOURCC_YV12 0x32315659 /* 'Y' 'V' '1' '2' */
|
||||
#define VMWARE_FOURCC_YUY2 0x32595559 /* 'Y' 'U' 'Y' '2' */
|
||||
#define VMWARE_FOURCC_UYVY 0x59565955 /* 'U' 'Y' 'V' 'Y' */
|
||||
|
||||
typedef enum {
|
||||
SVGA_OVERLAY_FORMAT_INVALID = 0,
|
||||
SVGA_OVERLAY_FORMAT_YV12 = VMWARE_FOURCC_YV12,
|
||||
SVGA_OVERLAY_FORMAT_YUY2 = VMWARE_FOURCC_YUY2,
|
||||
SVGA_OVERLAY_FORMAT_UYVY = VMWARE_FOURCC_UYVY,
|
||||
} SVGAOverlayFormat;
|
||||
|
||||
#define SVGA_VIDEO_COLORKEY_MASK 0x00ffffff
|
||||
|
||||
#define SVGA_ESCAPE_VMWARE_VIDEO 0x00020000
|
||||
|
||||
#define SVGA_ESCAPE_VMWARE_VIDEO_SET_REGS 0x00020001
|
||||
/* FIFO escape layout:
|
||||
* Type, Stream Id, (Register Id, Value) pairs */
|
||||
|
||||
#define SVGA_ESCAPE_VMWARE_VIDEO_FLUSH 0x00020002
|
||||
/* FIFO escape layout:
|
||||
* Type, Stream Id */
|
||||
|
||||
typedef
|
||||
struct SVGAEscapeVideoSetRegs {
|
||||
struct {
|
||||
uint32 cmdType;
|
||||
uint32 streamId;
|
||||
} header;
|
||||
|
||||
/* May include zero or more items. */
|
||||
struct {
|
||||
uint32 registerId;
|
||||
uint32 value;
|
||||
} items[1];
|
||||
} SVGAEscapeVideoSetRegs;
|
||||
|
||||
typedef
|
||||
struct SVGAEscapeVideoFlush {
|
||||
uint32 cmdType;
|
||||
uint32 streamId;
|
||||
} SVGAEscapeVideoFlush;
|
||||
|
||||
|
||||
/*
|
||||
* Struct definitions for the video overlay commands built on
|
||||
* SVGAFifoCmdEscape.
|
||||
*/
|
||||
typedef
|
||||
struct {
|
||||
uint32 command;
|
||||
uint32 overlay;
|
||||
} SVGAFifoEscapeCmdVideoBase;
|
||||
|
||||
typedef
|
||||
struct {
|
||||
SVGAFifoEscapeCmdVideoBase videoCmd;
|
||||
} SVGAFifoEscapeCmdVideoFlush;
|
||||
|
||||
typedef
|
||||
struct {
|
||||
SVGAFifoEscapeCmdVideoBase videoCmd;
|
||||
struct {
|
||||
uint32 regId;
|
||||
uint32 value;
|
||||
} items[1];
|
||||
} SVGAFifoEscapeCmdVideoSetRegs;
|
||||
|
||||
typedef
|
||||
struct {
|
||||
SVGAFifoEscapeCmdVideoBase videoCmd;
|
||||
struct {
|
||||
uint32 regId;
|
||||
uint32 value;
|
||||
} items[SVGA_VIDEO_NUM_REGS];
|
||||
} SVGAFifoEscapeCmdVideoSetAllRegs;
|
||||
|
||||
|
||||
/*
|
||||
*----------------------------------------------------------------------
|
||||
*
|
||||
* VMwareVideoGetAttributes --
|
||||
*
|
||||
* Computes the size, pitches and offsets for YUV frames.
|
||||
*
|
||||
* Results:
|
||||
* TRUE on success; otherwise FALSE on failure.
|
||||
*
|
||||
* Side effects:
|
||||
* Pitches and offsets for the given YUV frame are put in 'pitches'
|
||||
* and 'offsets' respectively. They are both optional though.
|
||||
*
|
||||
*----------------------------------------------------------------------
|
||||
*/
|
||||
|
||||
static inline bool
|
||||
VMwareVideoGetAttributes(const SVGAOverlayFormat format, /* IN */
|
||||
uint32 *width, /* IN / OUT */
|
||||
uint32 *height, /* IN / OUT */
|
||||
uint32 *size, /* OUT */
|
||||
uint32 *pitches, /* OUT (optional) */
|
||||
uint32 *offsets) /* OUT (optional) */
|
||||
{
|
||||
int tmp;
|
||||
|
||||
*width = (*width + 1) & ~1;
|
||||
|
||||
if (offsets) {
|
||||
offsets[0] = 0;
|
||||
}
|
||||
|
||||
switch (format) {
|
||||
case VMWARE_FOURCC_YV12:
|
||||
*height = (*height + 1) & ~1;
|
||||
*size = (*width + 3) & ~3;
|
||||
|
||||
if (pitches) {
|
||||
pitches[0] = *size;
|
||||
}
|
||||
|
||||
*size *= *height;
|
||||
|
||||
if (offsets) {
|
||||
offsets[1] = *size;
|
||||
}
|
||||
|
||||
tmp = ((*width >> 1) + 3) & ~3;
|
||||
|
||||
if (pitches) {
|
||||
pitches[1] = pitches[2] = tmp;
|
||||
}
|
||||
|
||||
tmp *= (*height >> 1);
|
||||
*size += tmp;
|
||||
|
||||
if (offsets) {
|
||||
offsets[2] = *size;
|
||||
}
|
||||
|
||||
*size += tmp;
|
||||
break;
|
||||
|
||||
case VMWARE_FOURCC_YUY2:
|
||||
case VMWARE_FOURCC_UYVY:
|
||||
*size = *width * 2;
|
||||
|
||||
if (pitches) {
|
||||
pitches[0] = *size;
|
||||
}
|
||||
|
||||
*size *= *height;
|
||||
break;
|
||||
|
||||
default:
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
#endif /* _SVGA_OVERLAY_H_ */
|
1552
drivers/gpu/drm/vmwgfx/svga_reg.h
Normal file
1552
drivers/gpu/drm/vmwgfx/svga_reg.h
Normal file
File diff suppressed because it is too large
Load diff
45
drivers/gpu/drm/vmwgfx/svga_types.h
Normal file
45
drivers/gpu/drm/vmwgfx/svga_types.h
Normal file
|
@ -0,0 +1,45 @@
|
|||
/**************************************************************************
|
||||
*
|
||||
* Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
|
||||
* All Rights Reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the
|
||||
* "Software"), to deal in the Software without restriction, including
|
||||
* without limitation the rights to use, copy, modify, merge, publish,
|
||||
* distribute, sub license, and/or sell copies of the Software, and to
|
||||
* permit persons to whom the Software is furnished to do so, subject to
|
||||
* the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the
|
||||
* next paragraph) shall be included in all copies or substantial portions
|
||||
* of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
|
||||
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
|
||||
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
|
||||
* USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
**************************************************************************/
|
||||
|
||||
/**
|
||||
* Silly typedefs for the svga headers. Currently the headers are shared
|
||||
* between all components that talk to svga. And as such the headers are
|
||||
* are in a completely different style and use weird defines.
|
||||
*
|
||||
* This file lets all the ugly be prefixed with svga*.
|
||||
*/
|
||||
|
||||
#ifndef _SVGA_TYPES_H_
|
||||
#define _SVGA_TYPES_H_
|
||||
|
||||
typedef uint16_t uint16;
|
||||
typedef uint32_t uint32;
|
||||
typedef uint8_t uint8;
|
||||
typedef int32_t int32;
|
||||
typedef bool Bool;
|
||||
|
||||
#endif
|
352
drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
Normal file
352
drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
Normal file
|
@ -0,0 +1,352 @@
|
|||
/**************************************************************************
|
||||
*
|
||||
* Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
|
||||
* All Rights Reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the
|
||||
* "Software"), to deal in the Software without restriction, including
|
||||
* without limitation the rights to use, copy, modify, merge, publish,
|
||||
* distribute, sub license, and/or sell copies of the Software, and to
|
||||
* permit persons to whom the Software is furnished to do so, subject to
|
||||
* the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the
|
||||
* next paragraph) shall be included in all copies or substantial portions
|
||||
* of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
|
||||
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
|
||||
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
|
||||
* USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
**************************************************************************/
|
||||
|
||||
#include "vmwgfx_drv.h"
|
||||
#include <drm/ttm/ttm_bo_driver.h>
|
||||
#include <drm/ttm/ttm_placement.h>
|
||||
#include <drm/ttm/ttm_page_alloc.h>
|
||||
|
||||
static uint32_t vram_placement_flags = TTM_PL_FLAG_VRAM |
|
||||
TTM_PL_FLAG_CACHED;
|
||||
|
||||
static uint32_t vram_ne_placement_flags = TTM_PL_FLAG_VRAM |
|
||||
TTM_PL_FLAG_CACHED |
|
||||
TTM_PL_FLAG_NO_EVICT;
|
||||
|
||||
static uint32_t sys_placement_flags = TTM_PL_FLAG_SYSTEM |
|
||||
TTM_PL_FLAG_CACHED;
|
||||
|
||||
static uint32_t gmr_placement_flags = VMW_PL_FLAG_GMR |
|
||||
TTM_PL_FLAG_CACHED;
|
||||
|
||||
static uint32_t gmr_ne_placement_flags = VMW_PL_FLAG_GMR |
|
||||
TTM_PL_FLAG_CACHED |
|
||||
TTM_PL_FLAG_NO_EVICT;
|
||||
|
||||
struct ttm_placement vmw_vram_placement = {
|
||||
.fpfn = 0,
|
||||
.lpfn = 0,
|
||||
.num_placement = 1,
|
||||
.placement = &vram_placement_flags,
|
||||
.num_busy_placement = 1,
|
||||
.busy_placement = &vram_placement_flags
|
||||
};
|
||||
|
||||
static uint32_t vram_gmr_placement_flags[] = {
|
||||
TTM_PL_FLAG_VRAM | TTM_PL_FLAG_CACHED,
|
||||
VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED
|
||||
};
|
||||
|
||||
static uint32_t gmr_vram_placement_flags[] = {
|
||||
VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED,
|
||||
TTM_PL_FLAG_VRAM | TTM_PL_FLAG_CACHED
|
||||
};
|
||||
|
||||
struct ttm_placement vmw_vram_gmr_placement = {
|
||||
.fpfn = 0,
|
||||
.lpfn = 0,
|
||||
.num_placement = 2,
|
||||
.placement = vram_gmr_placement_flags,
|
||||
.num_busy_placement = 1,
|
||||
.busy_placement = &gmr_placement_flags
|
||||
};
|
||||
|
||||
static uint32_t vram_gmr_ne_placement_flags[] = {
|
||||
TTM_PL_FLAG_VRAM | TTM_PL_FLAG_CACHED | TTM_PL_FLAG_NO_EVICT,
|
||||
VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED | TTM_PL_FLAG_NO_EVICT
|
||||
};
|
||||
|
||||
struct ttm_placement vmw_vram_gmr_ne_placement = {
|
||||
.fpfn = 0,
|
||||
.lpfn = 0,
|
||||
.num_placement = 2,
|
||||
.placement = vram_gmr_ne_placement_flags,
|
||||
.num_busy_placement = 1,
|
||||
.busy_placement = &gmr_ne_placement_flags
|
||||
};
|
||||
|
||||
struct ttm_placement vmw_vram_sys_placement = {
|
||||
.fpfn = 0,
|
||||
.lpfn = 0,
|
||||
.num_placement = 1,
|
||||
.placement = &vram_placement_flags,
|
||||
.num_busy_placement = 1,
|
||||
.busy_placement = &sys_placement_flags
|
||||
};
|
||||
|
||||
struct ttm_placement vmw_vram_ne_placement = {
|
||||
.fpfn = 0,
|
||||
.lpfn = 0,
|
||||
.num_placement = 1,
|
||||
.placement = &vram_ne_placement_flags,
|
||||
.num_busy_placement = 1,
|
||||
.busy_placement = &vram_ne_placement_flags
|
||||
};
|
||||
|
||||
struct ttm_placement vmw_sys_placement = {
|
||||
.fpfn = 0,
|
||||
.lpfn = 0,
|
||||
.num_placement = 1,
|
||||
.placement = &sys_placement_flags,
|
||||
.num_busy_placement = 1,
|
||||
.busy_placement = &sys_placement_flags
|
||||
};
|
||||
|
||||
static uint32_t evictable_placement_flags[] = {
|
||||
TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED,
|
||||
TTM_PL_FLAG_VRAM | TTM_PL_FLAG_CACHED,
|
||||
VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED
|
||||
};
|
||||
|
||||
struct ttm_placement vmw_evictable_placement = {
|
||||
.fpfn = 0,
|
||||
.lpfn = 0,
|
||||
.num_placement = 3,
|
||||
.placement = evictable_placement_flags,
|
||||
.num_busy_placement = 1,
|
||||
.busy_placement = &sys_placement_flags
|
||||
};
|
||||
|
||||
struct ttm_placement vmw_srf_placement = {
|
||||
.fpfn = 0,
|
||||
.lpfn = 0,
|
||||
.num_placement = 1,
|
||||
.num_busy_placement = 2,
|
||||
.placement = &gmr_placement_flags,
|
||||
.busy_placement = gmr_vram_placement_flags
|
||||
};
|
||||
|
||||
struct vmw_ttm_tt {
|
||||
struct ttm_tt ttm;
|
||||
struct vmw_private *dev_priv;
|
||||
int gmr_id;
|
||||
};
|
||||
|
||||
static int vmw_ttm_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem)
|
||||
{
|
||||
struct vmw_ttm_tt *vmw_be = container_of(ttm, struct vmw_ttm_tt, ttm);
|
||||
|
||||
vmw_be->gmr_id = bo_mem->start;
|
||||
|
||||
return vmw_gmr_bind(vmw_be->dev_priv, ttm->pages,
|
||||
ttm->num_pages, vmw_be->gmr_id);
|
||||
}
|
||||
|
||||
static int vmw_ttm_unbind(struct ttm_tt *ttm)
|
||||
{
|
||||
struct vmw_ttm_tt *vmw_be = container_of(ttm, struct vmw_ttm_tt, ttm);
|
||||
|
||||
vmw_gmr_unbind(vmw_be->dev_priv, vmw_be->gmr_id);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void vmw_ttm_destroy(struct ttm_tt *ttm)
|
||||
{
|
||||
struct vmw_ttm_tt *vmw_be = container_of(ttm, struct vmw_ttm_tt, ttm);
|
||||
|
||||
ttm_tt_fini(ttm);
|
||||
kfree(vmw_be);
|
||||
}
|
||||
|
||||
static struct ttm_backend_func vmw_ttm_func = {
|
||||
.bind = vmw_ttm_bind,
|
||||
.unbind = vmw_ttm_unbind,
|
||||
.destroy = vmw_ttm_destroy,
|
||||
};
|
||||
|
||||
struct ttm_tt *vmw_ttm_tt_create(struct ttm_bo_device *bdev,
|
||||
unsigned long size, uint32_t page_flags,
|
||||
struct page *dummy_read_page)
|
||||
{
|
||||
struct vmw_ttm_tt *vmw_be;
|
||||
|
||||
vmw_be = kmalloc(sizeof(*vmw_be), GFP_KERNEL);
|
||||
if (!vmw_be)
|
||||
return NULL;
|
||||
|
||||
vmw_be->ttm.func = &vmw_ttm_func;
|
||||
vmw_be->dev_priv = container_of(bdev, struct vmw_private, bdev);
|
||||
|
||||
if (ttm_tt_init(&vmw_be->ttm, bdev, size, page_flags, dummy_read_page)) {
|
||||
kfree(vmw_be);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
return &vmw_be->ttm;
|
||||
}
|
||||
|
||||
int vmw_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
int vmw_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
|
||||
struct ttm_mem_type_manager *man)
|
||||
{
|
||||
switch (type) {
|
||||
case TTM_PL_SYSTEM:
|
||||
/* System memory */
|
||||
|
||||
man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
|
||||
man->available_caching = TTM_PL_FLAG_CACHED;
|
||||
man->default_caching = TTM_PL_FLAG_CACHED;
|
||||
break;
|
||||
case TTM_PL_VRAM:
|
||||
/* "On-card" video ram */
|
||||
man->func = &ttm_bo_manager_func;
|
||||
man->gpu_offset = 0;
|
||||
man->flags = TTM_MEMTYPE_FLAG_FIXED | TTM_MEMTYPE_FLAG_MAPPABLE;
|
||||
man->available_caching = TTM_PL_FLAG_CACHED;
|
||||
man->default_caching = TTM_PL_FLAG_CACHED;
|
||||
break;
|
||||
case VMW_PL_GMR:
|
||||
/*
|
||||
* "Guest Memory Regions" is an aperture like feature with
|
||||
* one slot per bo. There is an upper limit of the number of
|
||||
* slots as well as the bo size.
|
||||
*/
|
||||
man->func = &vmw_gmrid_manager_func;
|
||||
man->gpu_offset = 0;
|
||||
man->flags = TTM_MEMTYPE_FLAG_CMA | TTM_MEMTYPE_FLAG_MAPPABLE;
|
||||
man->available_caching = TTM_PL_FLAG_CACHED;
|
||||
man->default_caching = TTM_PL_FLAG_CACHED;
|
||||
break;
|
||||
default:
|
||||
DRM_ERROR("Unsupported memory type %u\n", (unsigned)type);
|
||||
return -EINVAL;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
void vmw_evict_flags(struct ttm_buffer_object *bo,
|
||||
struct ttm_placement *placement)
|
||||
{
|
||||
*placement = vmw_sys_placement;
|
||||
}
|
||||
|
||||
static int vmw_verify_access(struct ttm_buffer_object *bo, struct file *filp)
|
||||
{
|
||||
struct ttm_object_file *tfile =
|
||||
vmw_fpriv((struct drm_file *)filp->private_data)->tfile;
|
||||
|
||||
return vmw_user_dmabuf_verify_access(bo, tfile);
|
||||
}
|
||||
|
||||
static int vmw_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
|
||||
{
|
||||
struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
|
||||
struct vmw_private *dev_priv = container_of(bdev, struct vmw_private, bdev);
|
||||
|
||||
mem->bus.addr = NULL;
|
||||
mem->bus.is_iomem = false;
|
||||
mem->bus.offset = 0;
|
||||
mem->bus.size = mem->num_pages << PAGE_SHIFT;
|
||||
mem->bus.base = 0;
|
||||
if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE))
|
||||
return -EINVAL;
|
||||
switch (mem->mem_type) {
|
||||
case TTM_PL_SYSTEM:
|
||||
case VMW_PL_GMR:
|
||||
return 0;
|
||||
case TTM_PL_VRAM:
|
||||
mem->bus.offset = mem->start << PAGE_SHIFT;
|
||||
mem->bus.base = dev_priv->vram_start;
|
||||
mem->bus.is_iomem = true;
|
||||
break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void vmw_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
|
||||
{
|
||||
}
|
||||
|
||||
static int vmw_ttm_fault_reserve_notify(struct ttm_buffer_object *bo)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* FIXME: We're using the old vmware polling method to sync.
|
||||
* Do this with fences instead.
|
||||
*/
|
||||
|
||||
static void *vmw_sync_obj_ref(void *sync_obj)
|
||||
{
|
||||
|
||||
return (void *)
|
||||
vmw_fence_obj_reference((struct vmw_fence_obj *) sync_obj);
|
||||
}
|
||||
|
||||
static void vmw_sync_obj_unref(void **sync_obj)
|
||||
{
|
||||
vmw_fence_obj_unreference((struct vmw_fence_obj **) sync_obj);
|
||||
}
|
||||
|
||||
static int vmw_sync_obj_flush(void *sync_obj)
|
||||
{
|
||||
vmw_fence_obj_flush((struct vmw_fence_obj *) sync_obj);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static bool vmw_sync_obj_signaled(void *sync_obj)
|
||||
{
|
||||
return vmw_fence_obj_signaled((struct vmw_fence_obj *) sync_obj,
|
||||
DRM_VMW_FENCE_FLAG_EXEC);
|
||||
|
||||
}
|
||||
|
||||
static int vmw_sync_obj_wait(void *sync_obj, bool lazy, bool interruptible)
|
||||
{
|
||||
return vmw_fence_obj_wait((struct vmw_fence_obj *) sync_obj,
|
||||
DRM_VMW_FENCE_FLAG_EXEC,
|
||||
lazy, interruptible,
|
||||
VMW_FENCE_WAIT_TIMEOUT);
|
||||
}
|
||||
|
||||
struct ttm_bo_driver vmw_bo_driver = {
|
||||
.ttm_tt_create = &vmw_ttm_tt_create,
|
||||
.ttm_tt_populate = &ttm_pool_populate,
|
||||
.ttm_tt_unpopulate = &ttm_pool_unpopulate,
|
||||
.invalidate_caches = vmw_invalidate_caches,
|
||||
.init_mem_type = vmw_init_mem_type,
|
||||
.evict_flags = vmw_evict_flags,
|
||||
.move = NULL,
|
||||
.verify_access = vmw_verify_access,
|
||||
.sync_obj_signaled = vmw_sync_obj_signaled,
|
||||
.sync_obj_wait = vmw_sync_obj_wait,
|
||||
.sync_obj_flush = vmw_sync_obj_flush,
|
||||
.sync_obj_unref = vmw_sync_obj_unref,
|
||||
.sync_obj_ref = vmw_sync_obj_ref,
|
||||
.move_notify = NULL,
|
||||
.swap_notify = NULL,
|
||||
.fault_reserve_notify = &vmw_ttm_fault_reserve_notify,
|
||||
.io_mem_reserve = &vmw_ttm_io_mem_reserve,
|
||||
.io_mem_free = &vmw_ttm_io_mem_free,
|
||||
};
|
274
drivers/gpu/drm/vmwgfx/vmwgfx_context.c
Normal file
274
drivers/gpu/drm/vmwgfx/vmwgfx_context.c
Normal file
|
@ -0,0 +1,274 @@
|
|||
/**************************************************************************
|
||||
*
|
||||
* Copyright © 2009-2012 VMware, Inc., Palo Alto, CA., USA
|
||||
* All Rights Reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the
|
||||
* "Software"), to deal in the Software without restriction, including
|
||||
* without limitation the rights to use, copy, modify, merge, publish,
|
||||
* distribute, sub license, and/or sell copies of the Software, and to
|
||||
* permit persons to whom the Software is furnished to do so, subject to
|
||||
* the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the
|
||||
* next paragraph) shall be included in all copies or substantial portions
|
||||
* of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
|
||||
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
|
||||
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
|
||||
* USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
**************************************************************************/
|
||||
|
||||
#include "vmwgfx_drv.h"
|
||||
#include "vmwgfx_resource_priv.h"
|
||||
#include "ttm/ttm_placement.h"
|
||||
|
||||
struct vmw_user_context {
|
||||
struct ttm_base_object base;
|
||||
struct vmw_resource res;
|
||||
};
|
||||
|
||||
static void vmw_user_context_free(struct vmw_resource *res);
|
||||
static struct vmw_resource *
|
||||
vmw_user_context_base_to_res(struct ttm_base_object *base);
|
||||
|
||||
static uint64_t vmw_user_context_size;
|
||||
|
||||
static const struct vmw_user_resource_conv user_context_conv = {
|
||||
.object_type = VMW_RES_CONTEXT,
|
||||
.base_obj_to_res = vmw_user_context_base_to_res,
|
||||
.res_free = vmw_user_context_free
|
||||
};
|
||||
|
||||
const struct vmw_user_resource_conv *user_context_converter =
|
||||
&user_context_conv;
|
||||
|
||||
|
||||
static const struct vmw_res_func vmw_legacy_context_func = {
|
||||
.res_type = vmw_res_context,
|
||||
.needs_backup = false,
|
||||
.may_evict = false,
|
||||
.type_name = "legacy contexts",
|
||||
.backup_placement = NULL,
|
||||
.create = NULL,
|
||||
.destroy = NULL,
|
||||
.bind = NULL,
|
||||
.unbind = NULL
|
||||
};
|
||||
|
||||
/**
|
||||
* Context management:
|
||||
*/
|
||||
|
||||
static void vmw_hw_context_destroy(struct vmw_resource *res)
|
||||
{
|
||||
|
||||
struct vmw_private *dev_priv = res->dev_priv;
|
||||
struct {
|
||||
SVGA3dCmdHeader header;
|
||||
SVGA3dCmdDestroyContext body;
|
||||
} *cmd;
|
||||
|
||||
|
||||
vmw_execbuf_release_pinned_bo(dev_priv);
|
||||
cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
|
||||
if (unlikely(cmd == NULL)) {
|
||||
DRM_ERROR("Failed reserving FIFO space for surface "
|
||||
"destruction.\n");
|
||||
return;
|
||||
}
|
||||
|
||||
cmd->header.id = cpu_to_le32(SVGA_3D_CMD_CONTEXT_DESTROY);
|
||||
cmd->header.size = cpu_to_le32(sizeof(cmd->body));
|
||||
cmd->body.cid = cpu_to_le32(res->id);
|
||||
|
||||
vmw_fifo_commit(dev_priv, sizeof(*cmd));
|
||||
vmw_3d_resource_dec(dev_priv, false);
|
||||
}
|
||||
|
||||
static int vmw_context_init(struct vmw_private *dev_priv,
|
||||
struct vmw_resource *res,
|
||||
void (*res_free) (struct vmw_resource *res))
|
||||
{
|
||||
int ret;
|
||||
|
||||
struct {
|
||||
SVGA3dCmdHeader header;
|
||||
SVGA3dCmdDefineContext body;
|
||||
} *cmd;
|
||||
|
||||
ret = vmw_resource_init(dev_priv, res, false,
|
||||
res_free, &vmw_legacy_context_func);
|
||||
|
||||
if (unlikely(ret != 0)) {
|
||||
DRM_ERROR("Failed to allocate a resource id.\n");
|
||||
goto out_early;
|
||||
}
|
||||
|
||||
if (unlikely(res->id >= SVGA3D_MAX_CONTEXT_IDS)) {
|
||||
DRM_ERROR("Out of hw context ids.\n");
|
||||
vmw_resource_unreference(&res);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
|
||||
if (unlikely(cmd == NULL)) {
|
||||
DRM_ERROR("Fifo reserve failed.\n");
|
||||
vmw_resource_unreference(&res);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
cmd->header.id = cpu_to_le32(SVGA_3D_CMD_CONTEXT_DEFINE);
|
||||
cmd->header.size = cpu_to_le32(sizeof(cmd->body));
|
||||
cmd->body.cid = cpu_to_le32(res->id);
|
||||
|
||||
vmw_fifo_commit(dev_priv, sizeof(*cmd));
|
||||
(void) vmw_3d_resource_inc(dev_priv, false);
|
||||
vmw_resource_activate(res, vmw_hw_context_destroy);
|
||||
return 0;
|
||||
|
||||
out_early:
|
||||
if (res_free == NULL)
|
||||
kfree(res);
|
||||
else
|
||||
res_free(res);
|
||||
return ret;
|
||||
}
|
||||
|
||||
struct vmw_resource *vmw_context_alloc(struct vmw_private *dev_priv)
|
||||
{
|
||||
struct vmw_resource *res = kmalloc(sizeof(*res), GFP_KERNEL);
|
||||
int ret;
|
||||
|
||||
if (unlikely(res == NULL))
|
||||
return NULL;
|
||||
|
||||
ret = vmw_context_init(dev_priv, res, NULL);
|
||||
|
||||
return (ret == 0) ? res : NULL;
|
||||
}
|
||||
|
||||
/**
|
||||
* User-space context management:
|
||||
*/
|
||||
|
||||
static struct vmw_resource *
|
||||
vmw_user_context_base_to_res(struct ttm_base_object *base)
|
||||
{
|
||||
return &(container_of(base, struct vmw_user_context, base)->res);
|
||||
}
|
||||
|
||||
static void vmw_user_context_free(struct vmw_resource *res)
|
||||
{
|
||||
struct vmw_user_context *ctx =
|
||||
container_of(res, struct vmw_user_context, res);
|
||||
struct vmw_private *dev_priv = res->dev_priv;
|
||||
|
||||
ttm_base_object_kfree(ctx, base);
|
||||
ttm_mem_global_free(vmw_mem_glob(dev_priv),
|
||||
vmw_user_context_size);
|
||||
}
|
||||
|
||||
/**
|
||||
* This function is called when user space has no more references on the
|
||||
* base object. It releases the base-object's reference on the resource object.
|
||||
*/
|
||||
|
||||
static void vmw_user_context_base_release(struct ttm_base_object **p_base)
|
||||
{
|
||||
struct ttm_base_object *base = *p_base;
|
||||
struct vmw_user_context *ctx =
|
||||
container_of(base, struct vmw_user_context, base);
|
||||
struct vmw_resource *res = &ctx->res;
|
||||
|
||||
*p_base = NULL;
|
||||
vmw_resource_unreference(&res);
|
||||
}
|
||||
|
||||
int vmw_context_destroy_ioctl(struct drm_device *dev, void *data,
|
||||
struct drm_file *file_priv)
|
||||
{
|
||||
struct drm_vmw_context_arg *arg = (struct drm_vmw_context_arg *)data;
|
||||
struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
|
||||
|
||||
return ttm_ref_object_base_unref(tfile, arg->cid, TTM_REF_USAGE);
|
||||
}
|
||||
|
||||
int vmw_context_define_ioctl(struct drm_device *dev, void *data,
|
||||
struct drm_file *file_priv)
|
||||
{
|
||||
struct vmw_private *dev_priv = vmw_priv(dev);
|
||||
struct vmw_user_context *ctx;
|
||||
struct vmw_resource *res;
|
||||
struct vmw_resource *tmp;
|
||||
struct drm_vmw_context_arg *arg = (struct drm_vmw_context_arg *)data;
|
||||
struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
|
||||
struct vmw_master *vmaster = vmw_master(file_priv->master);
|
||||
int ret;
|
||||
|
||||
|
||||
/*
|
||||
* Approximate idr memory usage with 128 bytes. It will be limited
|
||||
* by maximum number_of contexts anyway.
|
||||
*/
|
||||
|
||||
if (unlikely(vmw_user_context_size == 0))
|
||||
vmw_user_context_size = ttm_round_pot(sizeof(*ctx)) + 128;
|
||||
|
||||
ret = ttm_read_lock(&vmaster->lock, true);
|
||||
if (unlikely(ret != 0))
|
||||
return ret;
|
||||
|
||||
ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
|
||||
vmw_user_context_size,
|
||||
false, true);
|
||||
if (unlikely(ret != 0)) {
|
||||
if (ret != -ERESTARTSYS)
|
||||
DRM_ERROR("Out of graphics memory for context"
|
||||
" creation.\n");
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
|
||||
if (unlikely(ctx == NULL)) {
|
||||
ttm_mem_global_free(vmw_mem_glob(dev_priv),
|
||||
vmw_user_context_size);
|
||||
ret = -ENOMEM;
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
res = &ctx->res;
|
||||
ctx->base.shareable = false;
|
||||
ctx->base.tfile = NULL;
|
||||
|
||||
/*
|
||||
* From here on, the destructor takes over resource freeing.
|
||||
*/
|
||||
|
||||
ret = vmw_context_init(dev_priv, res, vmw_user_context_free);
|
||||
if (unlikely(ret != 0))
|
||||
goto out_unlock;
|
||||
|
||||
tmp = vmw_resource_reference(&ctx->res);
|
||||
ret = ttm_base_object_init(tfile, &ctx->base, false, VMW_RES_CONTEXT,
|
||||
&vmw_user_context_base_release, NULL);
|
||||
|
||||
if (unlikely(ret != 0)) {
|
||||
vmw_resource_unreference(&tmp);
|
||||
goto out_err;
|
||||
}
|
||||
|
||||
arg->cid = ctx->base.hash.key;
|
||||
out_err:
|
||||
vmw_resource_unreference(&res);
|
||||
out_unlock:
|
||||
ttm_read_unlock(&vmaster->lock);
|
||||
return ret;
|
||||
|
||||
}
|
320
drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c
Normal file
320
drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c
Normal file
|
@ -0,0 +1,320 @@
|
|||
/**************************************************************************
|
||||
*
|
||||
* Copyright © 2011 VMware, Inc., Palo Alto, CA., USA
|
||||
* All Rights Reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the
|
||||
* "Software"), to deal in the Software without restriction, including
|
||||
* without limitation the rights to use, copy, modify, merge, publish,
|
||||
* distribute, sub license, and/or sell copies of the Software, and to
|
||||
* permit persons to whom the Software is furnished to do so, subject to
|
||||
* the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the
|
||||
* next paragraph) shall be included in all copies or substantial portions
|
||||
* of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
|
||||
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
|
||||
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
|
||||
* USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
**************************************************************************/
|
||||
|
||||
#include <drm/ttm/ttm_placement.h>
|
||||
|
||||
#include <drm/drmP.h>
|
||||
#include "vmwgfx_drv.h"
|
||||
|
||||
|
||||
/**
|
||||
* vmw_dmabuf_to_placement - Validate a buffer to placement.
|
||||
*
|
||||
* @dev_priv: Driver private.
|
||||
* @buf: DMA buffer to move.
|
||||
* @pin: Pin buffer if true.
|
||||
* @interruptible: Use interruptible wait.
|
||||
*
|
||||
* May only be called by the current master since it assumes that the
|
||||
* master lock is the current master's lock.
|
||||
* This function takes the master's lock in write mode.
|
||||
* Flushes and unpins the query bo to avoid failures.
|
||||
*
|
||||
* Returns
|
||||
* -ERESTARTSYS if interrupted by a signal.
|
||||
*/
|
||||
int vmw_dmabuf_to_placement(struct vmw_private *dev_priv,
|
||||
struct vmw_dma_buffer *buf,
|
||||
struct ttm_placement *placement,
|
||||
bool interruptible)
|
||||
{
|
||||
struct vmw_master *vmaster = dev_priv->active_master;
|
||||
struct ttm_buffer_object *bo = &buf->base;
|
||||
int ret;
|
||||
|
||||
ret = ttm_write_lock(&vmaster->lock, interruptible);
|
||||
if (unlikely(ret != 0))
|
||||
return ret;
|
||||
|
||||
vmw_execbuf_release_pinned_bo(dev_priv);
|
||||
|
||||
ret = ttm_bo_reserve(bo, interruptible, false, false, 0);
|
||||
if (unlikely(ret != 0))
|
||||
goto err;
|
||||
|
||||
ret = ttm_bo_validate(bo, placement, interruptible, false);
|
||||
|
||||
ttm_bo_unreserve(bo);
|
||||
|
||||
err:
|
||||
ttm_write_unlock(&vmaster->lock);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* vmw_dmabuf_to_vram_or_gmr - Move a buffer to vram or gmr.
|
||||
*
|
||||
* May only be called by the current master since it assumes that the
|
||||
* master lock is the current master's lock.
|
||||
* This function takes the master's lock in write mode.
|
||||
* Flushes and unpins the query bo if @pin == true to avoid failures.
|
||||
*
|
||||
* @dev_priv: Driver private.
|
||||
* @buf: DMA buffer to move.
|
||||
* @pin: Pin buffer if true.
|
||||
* @interruptible: Use interruptible wait.
|
||||
*
|
||||
* Returns
|
||||
* -ERESTARTSYS if interrupted by a signal.
|
||||
*/
|
||||
int vmw_dmabuf_to_vram_or_gmr(struct vmw_private *dev_priv,
|
||||
struct vmw_dma_buffer *buf,
|
||||
bool pin, bool interruptible)
|
||||
{
|
||||
struct vmw_master *vmaster = dev_priv->active_master;
|
||||
struct ttm_buffer_object *bo = &buf->base;
|
||||
struct ttm_placement *placement;
|
||||
int ret;
|
||||
|
||||
ret = ttm_write_lock(&vmaster->lock, interruptible);
|
||||
if (unlikely(ret != 0))
|
||||
return ret;
|
||||
|
||||
if (pin)
|
||||
vmw_execbuf_release_pinned_bo(dev_priv);
|
||||
|
||||
ret = ttm_bo_reserve(bo, interruptible, false, false, 0);
|
||||
if (unlikely(ret != 0))
|
||||
goto err;
|
||||
|
||||
/**
|
||||
* Put BO in VRAM if there is space, otherwise as a GMR.
|
||||
* If there is no space in VRAM and GMR ids are all used up,
|
||||
* start evicting GMRs to make room. If the DMA buffer can't be
|
||||
* used as a GMR, this will return -ENOMEM.
|
||||
*/
|
||||
|
||||
if (pin)
|
||||
placement = &vmw_vram_gmr_ne_placement;
|
||||
else
|
||||
placement = &vmw_vram_gmr_placement;
|
||||
|
||||
ret = ttm_bo_validate(bo, placement, interruptible, false);
|
||||
if (likely(ret == 0) || ret == -ERESTARTSYS)
|
||||
goto err_unreserve;
|
||||
|
||||
|
||||
/**
|
||||
* If that failed, try VRAM again, this time evicting
|
||||
* previous contents.
|
||||
*/
|
||||
|
||||
if (pin)
|
||||
placement = &vmw_vram_ne_placement;
|
||||
else
|
||||
placement = &vmw_vram_placement;
|
||||
|
||||
ret = ttm_bo_validate(bo, placement, interruptible, false);
|
||||
|
||||
err_unreserve:
|
||||
ttm_bo_unreserve(bo);
|
||||
err:
|
||||
ttm_write_unlock(&vmaster->lock);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* vmw_dmabuf_to_vram - Move a buffer to vram.
|
||||
*
|
||||
* May only be called by the current master since it assumes that the
|
||||
* master lock is the current master's lock.
|
||||
* This function takes the master's lock in write mode.
|
||||
*
|
||||
* @dev_priv: Driver private.
|
||||
* @buf: DMA buffer to move.
|
||||
* @pin: Pin buffer in vram if true.
|
||||
* @interruptible: Use interruptible wait.
|
||||
*
|
||||
* Returns
|
||||
* -ERESTARTSYS if interrupted by a signal.
|
||||
*/
|
||||
int vmw_dmabuf_to_vram(struct vmw_private *dev_priv,
|
||||
struct vmw_dma_buffer *buf,
|
||||
bool pin, bool interruptible)
|
||||
{
|
||||
struct ttm_placement *placement;
|
||||
|
||||
if (pin)
|
||||
placement = &vmw_vram_ne_placement;
|
||||
else
|
||||
placement = &vmw_vram_placement;
|
||||
|
||||
return vmw_dmabuf_to_placement(dev_priv, buf,
|
||||
placement,
|
||||
interruptible);
|
||||
}
|
||||
|
||||
/**
|
||||
* vmw_dmabuf_to_start_of_vram - Move a buffer to start of vram.
|
||||
*
|
||||
* May only be called by the current master since it assumes that the
|
||||
* master lock is the current master's lock.
|
||||
* This function takes the master's lock in write mode.
|
||||
* Flushes and unpins the query bo if @pin == true to avoid failures.
|
||||
*
|
||||
* @dev_priv: Driver private.
|
||||
* @buf: DMA buffer to move.
|
||||
* @pin: Pin buffer in vram if true.
|
||||
* @interruptible: Use interruptible wait.
|
||||
*
|
||||
* Returns
|
||||
* -ERESTARTSYS if interrupted by a signal.
|
||||
*/
|
||||
int vmw_dmabuf_to_start_of_vram(struct vmw_private *dev_priv,
|
||||
struct vmw_dma_buffer *buf,
|
||||
bool pin, bool interruptible)
|
||||
{
|
||||
struct vmw_master *vmaster = dev_priv->active_master;
|
||||
struct ttm_buffer_object *bo = &buf->base;
|
||||
struct ttm_placement placement;
|
||||
int ret = 0;
|
||||
|
||||
if (pin)
|
||||
placement = vmw_vram_ne_placement;
|
||||
else
|
||||
placement = vmw_vram_placement;
|
||||
placement.lpfn = bo->num_pages;
|
||||
|
||||
ret = ttm_write_lock(&vmaster->lock, interruptible);
|
||||
if (unlikely(ret != 0))
|
||||
return ret;
|
||||
|
||||
if (pin)
|
||||
vmw_execbuf_release_pinned_bo(dev_priv);
|
||||
ret = ttm_bo_reserve(bo, interruptible, false, false, 0);
|
||||
if (unlikely(ret != 0))
|
||||
goto err_unlock;
|
||||
|
||||
/* Is this buffer already in vram but not at the start of it? */
|
||||
if (bo->mem.mem_type == TTM_PL_VRAM &&
|
||||
bo->mem.start < bo->num_pages &&
|
||||
bo->mem.start > 0)
|
||||
(void) ttm_bo_validate(bo, &vmw_sys_placement, false, false);
|
||||
|
||||
ret = ttm_bo_validate(bo, &placement, interruptible, false);
|
||||
|
||||
/* For some reason we didn't up at the start of vram */
|
||||
WARN_ON(ret == 0 && bo->offset != 0);
|
||||
|
||||
ttm_bo_unreserve(bo);
|
||||
err_unlock:
|
||||
ttm_write_unlock(&vmaster->lock);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* vmw_dmabuf_upin - Unpin the buffer given buffer, does not move the buffer.
|
||||
*
|
||||
* May only be called by the current master since it assumes that the
|
||||
* master lock is the current master's lock.
|
||||
* This function takes the master's lock in write mode.
|
||||
*
|
||||
* @dev_priv: Driver private.
|
||||
* @buf: DMA buffer to unpin.
|
||||
* @interruptible: Use interruptible wait.
|
||||
*
|
||||
* Returns
|
||||
* -ERESTARTSYS if interrupted by a signal.
|
||||
*/
|
||||
int vmw_dmabuf_unpin(struct vmw_private *dev_priv,
|
||||
struct vmw_dma_buffer *buf,
|
||||
bool interruptible)
|
||||
{
|
||||
/*
|
||||
* We could in theory early out if the buffer is
|
||||
* unpinned but we need to lock and reserve the buffer
|
||||
* anyways so we don't gain much by that.
|
||||
*/
|
||||
return vmw_dmabuf_to_placement(dev_priv, buf,
|
||||
&vmw_evictable_placement,
|
||||
interruptible);
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* vmw_bo_get_guest_ptr - Get the guest ptr representing the current placement
|
||||
* of a buffer.
|
||||
*
|
||||
* @bo: Pointer to a struct ttm_buffer_object. Must be pinned or reserved.
|
||||
* @ptr: SVGAGuestPtr returning the result.
|
||||
*/
|
||||
void vmw_bo_get_guest_ptr(const struct ttm_buffer_object *bo,
|
||||
SVGAGuestPtr *ptr)
|
||||
{
|
||||
if (bo->mem.mem_type == TTM_PL_VRAM) {
|
||||
ptr->gmrId = SVGA_GMR_FRAMEBUFFER;
|
||||
ptr->offset = bo->offset;
|
||||
} else {
|
||||
ptr->gmrId = bo->mem.start;
|
||||
ptr->offset = 0;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* vmw_bo_pin - Pin or unpin a buffer object without moving it.
|
||||
*
|
||||
* @bo: The buffer object. Must be reserved, and present either in VRAM
|
||||
* or GMR memory.
|
||||
* @pin: Whether to pin or unpin.
|
||||
*
|
||||
*/
|
||||
void vmw_bo_pin(struct ttm_buffer_object *bo, bool pin)
|
||||
{
|
||||
uint32_t pl_flags;
|
||||
struct ttm_placement placement;
|
||||
uint32_t old_mem_type = bo->mem.mem_type;
|
||||
int ret;
|
||||
|
||||
BUG_ON(!ttm_bo_is_reserved(bo));
|
||||
BUG_ON(old_mem_type != TTM_PL_VRAM &&
|
||||
old_mem_type != VMW_PL_GMR);
|
||||
|
||||
pl_flags = TTM_PL_FLAG_VRAM | VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED;
|
||||
if (pin)
|
||||
pl_flags |= TTM_PL_FLAG_NO_EVICT;
|
||||
|
||||
memset(&placement, 0, sizeof(placement));
|
||||
placement.num_placement = 1;
|
||||
placement.placement = &pl_flags;
|
||||
|
||||
ret = ttm_bo_validate(bo, &placement, false, true);
|
||||
|
||||
BUG_ON(ret != 0 || bo->mem.mem_type != old_mem_type);
|
||||
}
|
1215
drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
Normal file
1215
drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
Normal file
File diff suppressed because it is too large
Load diff
768
drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
Normal file
768
drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
Normal file
|
@ -0,0 +1,768 @@
|
|||
/**************************************************************************
|
||||
*
|
||||
* Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
|
||||
* All Rights Reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the
|
||||
* "Software"), to deal in the Software without restriction, including
|
||||
* without limitation the rights to use, copy, modify, merge, publish,
|
||||
* distribute, sub license, and/or sell copies of the Software, and to
|
||||
* permit persons to whom the Software is furnished to do so, subject to
|
||||
* the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the
|
||||
* next paragraph) shall be included in all copies or substantial portions
|
||||
* of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
|
||||
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
|
||||
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
|
||||
* USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
**************************************************************************/
|
||||
|
||||
#ifndef _VMWGFX_DRV_H_
|
||||
#define _VMWGFX_DRV_H_
|
||||
|
||||
#include "vmwgfx_reg.h"
|
||||
#include <drm/drmP.h>
|
||||
#include <drm/vmwgfx_drm.h>
|
||||
#include <drm/drm_hashtab.h>
|
||||
#include <linux/suspend.h>
|
||||
#include <drm/ttm/ttm_bo_driver.h>
|
||||
#include <drm/ttm/ttm_object.h>
|
||||
#include <drm/ttm/ttm_lock.h>
|
||||
#include <drm/ttm/ttm_execbuf_util.h>
|
||||
#include <drm/ttm/ttm_module.h>
|
||||
#include "vmwgfx_fence.h"
|
||||
|
||||
#define VMWGFX_DRIVER_DATE "20120209"
|
||||
#define VMWGFX_DRIVER_MAJOR 2
|
||||
#define VMWGFX_DRIVER_MINOR 4
|
||||
#define VMWGFX_DRIVER_PATCHLEVEL 0
|
||||
#define VMWGFX_FILE_PAGE_OFFSET 0x00100000
|
||||
#define VMWGFX_FIFO_STATIC_SIZE (1024*1024)
|
||||
#define VMWGFX_MAX_RELOCATIONS 2048
|
||||
#define VMWGFX_MAX_VALIDATIONS 2048
|
||||
#define VMWGFX_MAX_DISPLAYS 16
|
||||
#define VMWGFX_CMD_BOUNCE_INIT_SIZE 32768
|
||||
|
||||
#define VMW_PL_GMR TTM_PL_PRIV0
|
||||
#define VMW_PL_FLAG_GMR TTM_PL_FLAG_PRIV0
|
||||
|
||||
#define VMW_RES_CONTEXT ttm_driver_type0
|
||||
#define VMW_RES_SURFACE ttm_driver_type1
|
||||
#define VMW_RES_STREAM ttm_driver_type2
|
||||
#define VMW_RES_FENCE ttm_driver_type3
|
||||
|
||||
struct vmw_fpriv {
|
||||
struct drm_master *locked_master;
|
||||
struct ttm_object_file *tfile;
|
||||
struct list_head fence_events;
|
||||
};
|
||||
|
||||
struct vmw_dma_buffer {
|
||||
struct ttm_buffer_object base;
|
||||
struct list_head res_list;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct vmw_validate_buffer - Carries validation info about buffers.
|
||||
*
|
||||
* @base: Validation info for TTM.
|
||||
* @hash: Hash entry for quick lookup of the TTM buffer object.
|
||||
*
|
||||
* This structure contains also driver private validation info
|
||||
* on top of the info needed by TTM.
|
||||
*/
|
||||
struct vmw_validate_buffer {
|
||||
struct ttm_validate_buffer base;
|
||||
struct drm_hash_item hash;
|
||||
};
|
||||
|
||||
struct vmw_res_func;
|
||||
struct vmw_resource {
|
||||
struct kref kref;
|
||||
struct vmw_private *dev_priv;
|
||||
int id;
|
||||
bool avail;
|
||||
unsigned long backup_size;
|
||||
bool res_dirty; /* Protected by backup buffer reserved */
|
||||
bool backup_dirty; /* Protected by backup buffer reserved */
|
||||
struct vmw_dma_buffer *backup;
|
||||
unsigned long backup_offset;
|
||||
const struct vmw_res_func *func;
|
||||
struct list_head lru_head; /* Protected by the resource lock */
|
||||
struct list_head mob_head; /* Protected by @backup reserved */
|
||||
void (*res_free) (struct vmw_resource *res);
|
||||
void (*hw_destroy) (struct vmw_resource *res);
|
||||
};
|
||||
|
||||
enum vmw_res_type {
|
||||
vmw_res_context,
|
||||
vmw_res_surface,
|
||||
vmw_res_stream,
|
||||
vmw_res_max
|
||||
};
|
||||
|
||||
struct vmw_cursor_snooper {
|
||||
struct drm_crtc *crtc;
|
||||
size_t age;
|
||||
uint32_t *image;
|
||||
};
|
||||
|
||||
struct vmw_framebuffer;
|
||||
struct vmw_surface_offset;
|
||||
|
||||
struct vmw_surface {
|
||||
struct vmw_resource res;
|
||||
uint32_t flags;
|
||||
uint32_t format;
|
||||
uint32_t mip_levels[DRM_VMW_MAX_SURFACE_FACES];
|
||||
struct drm_vmw_size base_size;
|
||||
struct drm_vmw_size *sizes;
|
||||
uint32_t num_sizes;
|
||||
bool scanout;
|
||||
/* TODO so far just a extra pointer */
|
||||
struct vmw_cursor_snooper snooper;
|
||||
struct vmw_surface_offset *offsets;
|
||||
SVGA3dTextureFilter autogen_filter;
|
||||
uint32_t multisample_count;
|
||||
};
|
||||
|
||||
struct vmw_marker_queue {
|
||||
struct list_head head;
|
||||
struct timespec lag;
|
||||
struct timespec lag_time;
|
||||
spinlock_t lock;
|
||||
};
|
||||
|
||||
struct vmw_fifo_state {
|
||||
unsigned long reserved_size;
|
||||
__le32 *dynamic_buffer;
|
||||
__le32 *static_buffer;
|
||||
unsigned long static_buffer_size;
|
||||
bool using_bounce_buffer;
|
||||
uint32_t capabilities;
|
||||
struct mutex fifo_mutex;
|
||||
struct rw_semaphore rwsem;
|
||||
struct vmw_marker_queue marker_queue;
|
||||
};
|
||||
|
||||
struct vmw_relocation {
|
||||
SVGAGuestPtr *location;
|
||||
uint32_t index;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct vmw_res_cache_entry - resource information cache entry
|
||||
*
|
||||
* @valid: Whether the entry is valid, which also implies that the execbuf
|
||||
* code holds a reference to the resource, and it's placed on the
|
||||
* validation list.
|
||||
* @handle: User-space handle of a resource.
|
||||
* @res: Non-ref-counted pointer to the resource.
|
||||
*
|
||||
* Used to avoid frequent repeated user-space handle lookups of the
|
||||
* same resource.
|
||||
*/
|
||||
struct vmw_res_cache_entry {
|
||||
bool valid;
|
||||
uint32_t handle;
|
||||
struct vmw_resource *res;
|
||||
struct vmw_resource_val_node *node;
|
||||
};
|
||||
|
||||
struct vmw_sw_context{
|
||||
struct drm_open_hash res_ht;
|
||||
bool res_ht_initialized;
|
||||
bool kernel; /**< is the called made from the kernel */
|
||||
struct ttm_object_file *tfile;
|
||||
struct list_head validate_nodes;
|
||||
struct vmw_relocation relocs[VMWGFX_MAX_RELOCATIONS];
|
||||
uint32_t cur_reloc;
|
||||
struct vmw_validate_buffer val_bufs[VMWGFX_MAX_VALIDATIONS];
|
||||
uint32_t cur_val_buf;
|
||||
uint32_t *cmd_bounce;
|
||||
uint32_t cmd_bounce_size;
|
||||
struct list_head resource_list;
|
||||
uint32_t fence_flags;
|
||||
struct ttm_buffer_object *cur_query_bo;
|
||||
struct list_head res_relocations;
|
||||
uint32_t *buf_start;
|
||||
struct vmw_res_cache_entry res_cache[vmw_res_max];
|
||||
struct vmw_resource *last_query_ctx;
|
||||
bool needs_post_query_barrier;
|
||||
struct vmw_resource *error_resource;
|
||||
};
|
||||
|
||||
struct vmw_legacy_display;
|
||||
struct vmw_overlay;
|
||||
|
||||
struct vmw_master {
|
||||
struct ttm_lock lock;
|
||||
struct mutex fb_surf_mutex;
|
||||
struct list_head fb_surf;
|
||||
};
|
||||
|
||||
struct vmw_vga_topology_state {
|
||||
uint32_t width;
|
||||
uint32_t height;
|
||||
uint32_t primary;
|
||||
uint32_t pos_x;
|
||||
uint32_t pos_y;
|
||||
};
|
||||
|
||||
struct vmw_private {
|
||||
struct ttm_bo_device bdev;
|
||||
struct ttm_bo_global_ref bo_global_ref;
|
||||
struct drm_global_reference mem_global_ref;
|
||||
|
||||
struct vmw_fifo_state fifo;
|
||||
|
||||
struct drm_device *dev;
|
||||
unsigned long vmw_chipset;
|
||||
unsigned int io_start;
|
||||
uint32_t vram_start;
|
||||
uint32_t vram_size;
|
||||
uint32_t mmio_start;
|
||||
uint32_t mmio_size;
|
||||
uint32_t fb_max_width;
|
||||
uint32_t fb_max_height;
|
||||
uint32_t initial_width;
|
||||
uint32_t initial_height;
|
||||
__le32 __iomem *mmio_virt;
|
||||
int mmio_mtrr;
|
||||
uint32_t capabilities;
|
||||
uint32_t max_gmr_descriptors;
|
||||
uint32_t max_gmr_ids;
|
||||
uint32_t max_gmr_pages;
|
||||
uint32_t memory_size;
|
||||
bool has_gmr;
|
||||
struct mutex hw_mutex;
|
||||
|
||||
/*
|
||||
* VGA registers.
|
||||
*/
|
||||
|
||||
struct vmw_vga_topology_state vga_save[VMWGFX_MAX_DISPLAYS];
|
||||
uint32_t vga_width;
|
||||
uint32_t vga_height;
|
||||
uint32_t vga_bpp;
|
||||
uint32_t vga_bpl;
|
||||
uint32_t vga_pitchlock;
|
||||
|
||||
uint32_t num_displays;
|
||||
|
||||
/*
|
||||
* Framebuffer info.
|
||||
*/
|
||||
|
||||
void *fb_info;
|
||||
struct vmw_legacy_display *ldu_priv;
|
||||
struct vmw_screen_object_display *sou_priv;
|
||||
struct vmw_overlay *overlay_priv;
|
||||
|
||||
/*
|
||||
* Context and surface management.
|
||||
*/
|
||||
|
||||
rwlock_t resource_lock;
|
||||
struct idr res_idr[vmw_res_max];
|
||||
/*
|
||||
* Block lastclose from racing with firstopen.
|
||||
*/
|
||||
|
||||
struct mutex init_mutex;
|
||||
|
||||
/*
|
||||
* A resource manager for kernel-only surfaces and
|
||||
* contexts.
|
||||
*/
|
||||
|
||||
struct ttm_object_device *tdev;
|
||||
|
||||
/*
|
||||
* Fencing and IRQs.
|
||||
*/
|
||||
|
||||
atomic_t marker_seq;
|
||||
wait_queue_head_t fence_queue;
|
||||
wait_queue_head_t fifo_queue;
|
||||
int fence_queue_waiters; /* Protected by hw_mutex */
|
||||
int goal_queue_waiters; /* Protected by hw_mutex */
|
||||
atomic_t fifo_queue_waiters;
|
||||
uint32_t last_read_seqno;
|
||||
spinlock_t irq_lock;
|
||||
struct vmw_fence_manager *fman;
|
||||
uint32_t irq_mask;
|
||||
|
||||
/*
|
||||
* Device state
|
||||
*/
|
||||
|
||||
uint32_t traces_state;
|
||||
uint32_t enable_state;
|
||||
uint32_t config_done_state;
|
||||
|
||||
/**
|
||||
* Execbuf
|
||||
*/
|
||||
/**
|
||||
* Protected by the cmdbuf mutex.
|
||||
*/
|
||||
|
||||
struct vmw_sw_context ctx;
|
||||
struct mutex cmdbuf_mutex;
|
||||
|
||||
/**
|
||||
* Operating mode.
|
||||
*/
|
||||
|
||||
bool stealth;
|
||||
bool is_opened;
|
||||
bool enable_fb;
|
||||
|
||||
/**
|
||||
* Master management.
|
||||
*/
|
||||
|
||||
struct vmw_master *active_master;
|
||||
struct vmw_master fbdev_master;
|
||||
struct notifier_block pm_nb;
|
||||
bool suspended;
|
||||
|
||||
struct mutex release_mutex;
|
||||
uint32_t num_3d_resources;
|
||||
|
||||
/*
|
||||
* Query processing. These members
|
||||
* are protected by the cmdbuf mutex.
|
||||
*/
|
||||
|
||||
struct ttm_buffer_object *dummy_query_bo;
|
||||
struct ttm_buffer_object *pinned_bo;
|
||||
uint32_t query_cid;
|
||||
uint32_t query_cid_valid;
|
||||
bool dummy_query_bo_pinned;
|
||||
|
||||
/*
|
||||
* Surface swapping. The "surface_lru" list is protected by the
|
||||
* resource lock in order to be able to destroy a surface and take
|
||||
* it off the lru atomically. "used_memory_size" is currently
|
||||
* protected by the cmdbuf mutex for simplicity.
|
||||
*/
|
||||
|
||||
struct list_head res_lru[vmw_res_max];
|
||||
uint32_t used_memory_size;
|
||||
};
|
||||
|
||||
static inline struct vmw_surface *vmw_res_to_srf(struct vmw_resource *res)
|
||||
{
|
||||
return container_of(res, struct vmw_surface, res);
|
||||
}
|
||||
|
||||
static inline struct vmw_private *vmw_priv(struct drm_device *dev)
|
||||
{
|
||||
return (struct vmw_private *)dev->dev_private;
|
||||
}
|
||||
|
||||
static inline struct vmw_fpriv *vmw_fpriv(struct drm_file *file_priv)
|
||||
{
|
||||
return (struct vmw_fpriv *)file_priv->driver_priv;
|
||||
}
|
||||
|
||||
static inline struct vmw_master *vmw_master(struct drm_master *master)
|
||||
{
|
||||
return (struct vmw_master *) master->driver_priv;
|
||||
}
|
||||
|
||||
static inline void vmw_write(struct vmw_private *dev_priv,
|
||||
unsigned int offset, uint32_t value)
|
||||
{
|
||||
outl(offset, dev_priv->io_start + VMWGFX_INDEX_PORT);
|
||||
outl(value, dev_priv->io_start + VMWGFX_VALUE_PORT);
|
||||
}
|
||||
|
||||
static inline uint32_t vmw_read(struct vmw_private *dev_priv,
|
||||
unsigned int offset)
|
||||
{
|
||||
uint32_t val;
|
||||
|
||||
outl(offset, dev_priv->io_start + VMWGFX_INDEX_PORT);
|
||||
val = inl(dev_priv->io_start + VMWGFX_VALUE_PORT);
|
||||
return val;
|
||||
}
|
||||
|
||||
int vmw_3d_resource_inc(struct vmw_private *dev_priv, bool unhide_svga);
|
||||
void vmw_3d_resource_dec(struct vmw_private *dev_priv, bool hide_svga);
|
||||
|
||||
/**
|
||||
* GMR utilities - vmwgfx_gmr.c
|
||||
*/
|
||||
|
||||
extern int vmw_gmr_bind(struct vmw_private *dev_priv,
|
||||
struct page *pages[],
|
||||
unsigned long num_pages,
|
||||
int gmr_id);
|
||||
extern void vmw_gmr_unbind(struct vmw_private *dev_priv, int gmr_id);
|
||||
|
||||
/**
|
||||
* Resource utilities - vmwgfx_resource.c
|
||||
*/
|
||||
struct vmw_user_resource_conv;
|
||||
extern const struct vmw_user_resource_conv *user_surface_converter;
|
||||
extern const struct vmw_user_resource_conv *user_context_converter;
|
||||
|
||||
extern struct vmw_resource *vmw_context_alloc(struct vmw_private *dev_priv);
|
||||
extern void vmw_resource_unreference(struct vmw_resource **p_res);
|
||||
extern struct vmw_resource *vmw_resource_reference(struct vmw_resource *res);
|
||||
extern int vmw_resource_validate(struct vmw_resource *res);
|
||||
extern int vmw_resource_reserve(struct vmw_resource *res, bool no_backup);
|
||||
extern bool vmw_resource_needs_backup(const struct vmw_resource *res);
|
||||
extern int vmw_context_destroy_ioctl(struct drm_device *dev, void *data,
|
||||
struct drm_file *file_priv);
|
||||
extern int vmw_context_define_ioctl(struct drm_device *dev, void *data,
|
||||
struct drm_file *file_priv);
|
||||
extern int vmw_context_check(struct vmw_private *dev_priv,
|
||||
struct ttm_object_file *tfile,
|
||||
int id,
|
||||
struct vmw_resource **p_res);
|
||||
extern int vmw_user_lookup_handle(struct vmw_private *dev_priv,
|
||||
struct ttm_object_file *tfile,
|
||||
uint32_t handle,
|
||||
struct vmw_surface **out_surf,
|
||||
struct vmw_dma_buffer **out_buf);
|
||||
extern int vmw_user_resource_lookup_handle(
|
||||
struct vmw_private *dev_priv,
|
||||
struct ttm_object_file *tfile,
|
||||
uint32_t handle,
|
||||
const struct vmw_user_resource_conv *converter,
|
||||
struct vmw_resource **p_res);
|
||||
extern void vmw_surface_res_free(struct vmw_resource *res);
|
||||
extern int vmw_surface_destroy_ioctl(struct drm_device *dev, void *data,
|
||||
struct drm_file *file_priv);
|
||||
extern int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
|
||||
struct drm_file *file_priv);
|
||||
extern int vmw_surface_reference_ioctl(struct drm_device *dev, void *data,
|
||||
struct drm_file *file_priv);
|
||||
extern int vmw_surface_check(struct vmw_private *dev_priv,
|
||||
struct ttm_object_file *tfile,
|
||||
uint32_t handle, int *id);
|
||||
extern int vmw_surface_validate(struct vmw_private *dev_priv,
|
||||
struct vmw_surface *srf);
|
||||
extern void vmw_dmabuf_bo_free(struct ttm_buffer_object *bo);
|
||||
extern int vmw_dmabuf_init(struct vmw_private *dev_priv,
|
||||
struct vmw_dma_buffer *vmw_bo,
|
||||
size_t size, struct ttm_placement *placement,
|
||||
bool interuptable,
|
||||
void (*bo_free) (struct ttm_buffer_object *bo));
|
||||
extern int vmw_user_dmabuf_verify_access(struct ttm_buffer_object *bo,
|
||||
struct ttm_object_file *tfile);
|
||||
extern int vmw_dmabuf_alloc_ioctl(struct drm_device *dev, void *data,
|
||||
struct drm_file *file_priv);
|
||||
extern int vmw_dmabuf_unref_ioctl(struct drm_device *dev, void *data,
|
||||
struct drm_file *file_priv);
|
||||
extern uint32_t vmw_dmabuf_validate_node(struct ttm_buffer_object *bo,
|
||||
uint32_t cur_validate_node);
|
||||
extern void vmw_dmabuf_validate_clear(struct ttm_buffer_object *bo);
|
||||
extern int vmw_user_dmabuf_lookup(struct ttm_object_file *tfile,
|
||||
uint32_t id, struct vmw_dma_buffer **out);
|
||||
extern int vmw_stream_claim_ioctl(struct drm_device *dev, void *data,
|
||||
struct drm_file *file_priv);
|
||||
extern int vmw_stream_unref_ioctl(struct drm_device *dev, void *data,
|
||||
struct drm_file *file_priv);
|
||||
extern int vmw_user_stream_lookup(struct vmw_private *dev_priv,
|
||||
struct ttm_object_file *tfile,
|
||||
uint32_t *inout_id,
|
||||
struct vmw_resource **out);
|
||||
extern void vmw_resource_unreserve(struct vmw_resource *res,
|
||||
struct vmw_dma_buffer *new_backup,
|
||||
unsigned long new_backup_offset);
|
||||
extern void vmw_resource_move_notify(struct ttm_buffer_object *bo,
|
||||
struct ttm_mem_reg *mem);
|
||||
extern void vmw_fence_single_bo(struct ttm_buffer_object *bo,
|
||||
struct vmw_fence_obj *fence);
|
||||
extern void vmw_resource_evict_all(struct vmw_private *dev_priv);
|
||||
|
||||
/**
|
||||
* DMA buffer helper routines - vmwgfx_dmabuf.c
|
||||
*/
|
||||
extern int vmw_dmabuf_to_placement(struct vmw_private *vmw_priv,
|
||||
struct vmw_dma_buffer *bo,
|
||||
struct ttm_placement *placement,
|
||||
bool interruptible);
|
||||
extern int vmw_dmabuf_to_vram(struct vmw_private *dev_priv,
|
||||
struct vmw_dma_buffer *buf,
|
||||
bool pin, bool interruptible);
|
||||
extern int vmw_dmabuf_to_vram_or_gmr(struct vmw_private *dev_priv,
|
||||
struct vmw_dma_buffer *buf,
|
||||
bool pin, bool interruptible);
|
||||
extern int vmw_dmabuf_to_start_of_vram(struct vmw_private *vmw_priv,
|
||||
struct vmw_dma_buffer *bo,
|
||||
bool pin, bool interruptible);
|
||||
extern int vmw_dmabuf_unpin(struct vmw_private *vmw_priv,
|
||||
struct vmw_dma_buffer *bo,
|
||||
bool interruptible);
|
||||
extern void vmw_bo_get_guest_ptr(const struct ttm_buffer_object *buf,
|
||||
SVGAGuestPtr *ptr);
|
||||
extern void vmw_bo_pin(struct ttm_buffer_object *bo, bool pin);
|
||||
|
||||
/**
|
||||
* Misc Ioctl functionality - vmwgfx_ioctl.c
|
||||
*/
|
||||
|
||||
extern int vmw_getparam_ioctl(struct drm_device *dev, void *data,
|
||||
struct drm_file *file_priv);
|
||||
extern int vmw_get_cap_3d_ioctl(struct drm_device *dev, void *data,
|
||||
struct drm_file *file_priv);
|
||||
extern int vmw_present_ioctl(struct drm_device *dev, void *data,
|
||||
struct drm_file *file_priv);
|
||||
extern int vmw_present_readback_ioctl(struct drm_device *dev, void *data,
|
||||
struct drm_file *file_priv);
|
||||
extern unsigned int vmw_fops_poll(struct file *filp,
|
||||
struct poll_table_struct *wait);
|
||||
extern ssize_t vmw_fops_read(struct file *filp, char __user *buffer,
|
||||
size_t count, loff_t *offset);
|
||||
|
||||
/**
|
||||
* Fifo utilities - vmwgfx_fifo.c
|
||||
*/
|
||||
|
||||
extern int vmw_fifo_init(struct vmw_private *dev_priv,
|
||||
struct vmw_fifo_state *fifo);
|
||||
extern void vmw_fifo_release(struct vmw_private *dev_priv,
|
||||
struct vmw_fifo_state *fifo);
|
||||
extern void *vmw_fifo_reserve(struct vmw_private *dev_priv, uint32_t bytes);
|
||||
extern void vmw_fifo_commit(struct vmw_private *dev_priv, uint32_t bytes);
|
||||
extern int vmw_fifo_send_fence(struct vmw_private *dev_priv,
|
||||
uint32_t *seqno);
|
||||
extern void vmw_fifo_ping_host(struct vmw_private *dev_priv, uint32_t reason);
|
||||
extern bool vmw_fifo_have_3d(struct vmw_private *dev_priv);
|
||||
extern bool vmw_fifo_have_pitchlock(struct vmw_private *dev_priv);
|
||||
extern int vmw_fifo_emit_dummy_query(struct vmw_private *dev_priv,
|
||||
uint32_t cid);
|
||||
|
||||
/**
|
||||
* TTM glue - vmwgfx_ttm_glue.c
|
||||
*/
|
||||
|
||||
extern int vmw_ttm_global_init(struct vmw_private *dev_priv);
|
||||
extern void vmw_ttm_global_release(struct vmw_private *dev_priv);
|
||||
extern int vmw_mmap(struct file *filp, struct vm_area_struct *vma);
|
||||
|
||||
/**
|
||||
* TTM buffer object driver - vmwgfx_buffer.c
|
||||
*/
|
||||
|
||||
extern struct ttm_placement vmw_vram_placement;
|
||||
extern struct ttm_placement vmw_vram_ne_placement;
|
||||
extern struct ttm_placement vmw_vram_sys_placement;
|
||||
extern struct ttm_placement vmw_vram_gmr_placement;
|
||||
extern struct ttm_placement vmw_vram_gmr_ne_placement;
|
||||
extern struct ttm_placement vmw_sys_placement;
|
||||
extern struct ttm_placement vmw_evictable_placement;
|
||||
extern struct ttm_placement vmw_srf_placement;
|
||||
extern struct ttm_bo_driver vmw_bo_driver;
|
||||
extern int vmw_dma_quiescent(struct drm_device *dev);
|
||||
|
||||
/**
|
||||
* Command submission - vmwgfx_execbuf.c
|
||||
*/
|
||||
|
||||
extern int vmw_execbuf_ioctl(struct drm_device *dev, void *data,
|
||||
struct drm_file *file_priv);
|
||||
extern int vmw_execbuf_process(struct drm_file *file_priv,
|
||||
struct vmw_private *dev_priv,
|
||||
void __user *user_commands,
|
||||
void *kernel_commands,
|
||||
uint32_t command_size,
|
||||
uint64_t throttle_us,
|
||||
struct drm_vmw_fence_rep __user
|
||||
*user_fence_rep,
|
||||
struct vmw_fence_obj **out_fence);
|
||||
extern void __vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv,
|
||||
struct vmw_fence_obj *fence);
|
||||
extern void vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv);
|
||||
|
||||
extern int vmw_execbuf_fence_commands(struct drm_file *file_priv,
|
||||
struct vmw_private *dev_priv,
|
||||
struct vmw_fence_obj **p_fence,
|
||||
uint32_t *p_handle);
|
||||
extern void vmw_execbuf_copy_fence_user(struct vmw_private *dev_priv,
|
||||
struct vmw_fpriv *vmw_fp,
|
||||
int ret,
|
||||
struct drm_vmw_fence_rep __user
|
||||
*user_fence_rep,
|
||||
struct vmw_fence_obj *fence,
|
||||
uint32_t fence_handle);
|
||||
|
||||
/**
|
||||
* IRQs and wating - vmwgfx_irq.c
|
||||
*/
|
||||
|
||||
extern irqreturn_t vmw_irq_handler(DRM_IRQ_ARGS);
|
||||
extern int vmw_wait_seqno(struct vmw_private *dev_priv, bool lazy,
|
||||
uint32_t seqno, bool interruptible,
|
||||
unsigned long timeout);
|
||||
extern void vmw_irq_preinstall(struct drm_device *dev);
|
||||
extern int vmw_irq_postinstall(struct drm_device *dev);
|
||||
extern void vmw_irq_uninstall(struct drm_device *dev);
|
||||
extern bool vmw_seqno_passed(struct vmw_private *dev_priv,
|
||||
uint32_t seqno);
|
||||
extern int vmw_fallback_wait(struct vmw_private *dev_priv,
|
||||
bool lazy,
|
||||
bool fifo_idle,
|
||||
uint32_t seqno,
|
||||
bool interruptible,
|
||||
unsigned long timeout);
|
||||
extern void vmw_update_seqno(struct vmw_private *dev_priv,
|
||||
struct vmw_fifo_state *fifo_state);
|
||||
extern void vmw_seqno_waiter_add(struct vmw_private *dev_priv);
|
||||
extern void vmw_seqno_waiter_remove(struct vmw_private *dev_priv);
|
||||
extern void vmw_goal_waiter_add(struct vmw_private *dev_priv);
|
||||
extern void vmw_goal_waiter_remove(struct vmw_private *dev_priv);
|
||||
|
||||
/**
|
||||
* Rudimentary fence-like objects currently used only for throttling -
|
||||
* vmwgfx_marker.c
|
||||
*/
|
||||
|
||||
extern void vmw_marker_queue_init(struct vmw_marker_queue *queue);
|
||||
extern void vmw_marker_queue_takedown(struct vmw_marker_queue *queue);
|
||||
extern int vmw_marker_push(struct vmw_marker_queue *queue,
|
||||
uint32_t seqno);
|
||||
extern int vmw_marker_pull(struct vmw_marker_queue *queue,
|
||||
uint32_t signaled_seqno);
|
||||
extern int vmw_wait_lag(struct vmw_private *dev_priv,
|
||||
struct vmw_marker_queue *queue, uint32_t us);
|
||||
|
||||
/**
|
||||
* Kernel framebuffer - vmwgfx_fb.c
|
||||
*/
|
||||
|
||||
int vmw_fb_init(struct vmw_private *vmw_priv);
|
||||
int vmw_fb_close(struct vmw_private *dev_priv);
|
||||
int vmw_fb_off(struct vmw_private *vmw_priv);
|
||||
int vmw_fb_on(struct vmw_private *vmw_priv);
|
||||
|
||||
/**
|
||||
* Kernel modesetting - vmwgfx_kms.c
|
||||
*/
|
||||
|
||||
int vmw_kms_init(struct vmw_private *dev_priv);
|
||||
int vmw_kms_close(struct vmw_private *dev_priv);
|
||||
int vmw_kms_save_vga(struct vmw_private *vmw_priv);
|
||||
int vmw_kms_restore_vga(struct vmw_private *vmw_priv);
|
||||
int vmw_kms_cursor_bypass_ioctl(struct drm_device *dev, void *data,
|
||||
struct drm_file *file_priv);
|
||||
void vmw_kms_cursor_post_execbuf(struct vmw_private *dev_priv);
|
||||
void vmw_kms_cursor_snoop(struct vmw_surface *srf,
|
||||
struct ttm_object_file *tfile,
|
||||
struct ttm_buffer_object *bo,
|
||||
SVGA3dCmdHeader *header);
|
||||
int vmw_kms_write_svga(struct vmw_private *vmw_priv,
|
||||
unsigned width, unsigned height, unsigned pitch,
|
||||
unsigned bpp, unsigned depth);
|
||||
void vmw_kms_idle_workqueues(struct vmw_master *vmaster);
|
||||
bool vmw_kms_validate_mode_vram(struct vmw_private *dev_priv,
|
||||
uint32_t pitch,
|
||||
uint32_t height);
|
||||
u32 vmw_get_vblank_counter(struct drm_device *dev, int crtc);
|
||||
int vmw_enable_vblank(struct drm_device *dev, int crtc);
|
||||
void vmw_disable_vblank(struct drm_device *dev, int crtc);
|
||||
int vmw_kms_present(struct vmw_private *dev_priv,
|
||||
struct drm_file *file_priv,
|
||||
struct vmw_framebuffer *vfb,
|
||||
struct vmw_surface *surface,
|
||||
uint32_t sid, int32_t destX, int32_t destY,
|
||||
struct drm_vmw_rect *clips,
|
||||
uint32_t num_clips);
|
||||
int vmw_kms_readback(struct vmw_private *dev_priv,
|
||||
struct drm_file *file_priv,
|
||||
struct vmw_framebuffer *vfb,
|
||||
struct drm_vmw_fence_rep __user *user_fence_rep,
|
||||
struct drm_vmw_rect *clips,
|
||||
uint32_t num_clips);
|
||||
int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data,
|
||||
struct drm_file *file_priv);
|
||||
|
||||
int vmw_dumb_create(struct drm_file *file_priv,
|
||||
struct drm_device *dev,
|
||||
struct drm_mode_create_dumb *args);
|
||||
|
||||
int vmw_dumb_map_offset(struct drm_file *file_priv,
|
||||
struct drm_device *dev, uint32_t handle,
|
||||
uint64_t *offset);
|
||||
int vmw_dumb_destroy(struct drm_file *file_priv,
|
||||
struct drm_device *dev,
|
||||
uint32_t handle);
|
||||
/**
|
||||
* Overlay control - vmwgfx_overlay.c
|
||||
*/
|
||||
|
||||
int vmw_overlay_init(struct vmw_private *dev_priv);
|
||||
int vmw_overlay_close(struct vmw_private *dev_priv);
|
||||
int vmw_overlay_ioctl(struct drm_device *dev, void *data,
|
||||
struct drm_file *file_priv);
|
||||
int vmw_overlay_stop_all(struct vmw_private *dev_priv);
|
||||
int vmw_overlay_resume_all(struct vmw_private *dev_priv);
|
||||
int vmw_overlay_pause_all(struct vmw_private *dev_priv);
|
||||
int vmw_overlay_claim(struct vmw_private *dev_priv, uint32_t *out);
|
||||
int vmw_overlay_unref(struct vmw_private *dev_priv, uint32_t stream_id);
|
||||
int vmw_overlay_num_overlays(struct vmw_private *dev_priv);
|
||||
int vmw_overlay_num_free_overlays(struct vmw_private *dev_priv);
|
||||
|
||||
/**
|
||||
* GMR Id manager
|
||||
*/
|
||||
|
||||
extern const struct ttm_mem_type_manager_func vmw_gmrid_manager_func;
|
||||
|
||||
/**
|
||||
* Inline helper functions
|
||||
*/
|
||||
|
||||
static inline void vmw_surface_unreference(struct vmw_surface **srf)
|
||||
{
|
||||
struct vmw_surface *tmp_srf = *srf;
|
||||
struct vmw_resource *res = &tmp_srf->res;
|
||||
*srf = NULL;
|
||||
|
||||
vmw_resource_unreference(&res);
|
||||
}
|
||||
|
||||
static inline struct vmw_surface *vmw_surface_reference(struct vmw_surface *srf)
|
||||
{
|
||||
(void) vmw_resource_reference(&srf->res);
|
||||
return srf;
|
||||
}
|
||||
|
||||
static inline void vmw_dmabuf_unreference(struct vmw_dma_buffer **buf)
|
||||
{
|
||||
struct vmw_dma_buffer *tmp_buf = *buf;
|
||||
|
||||
*buf = NULL;
|
||||
if (tmp_buf != NULL) {
|
||||
struct ttm_buffer_object *bo = &tmp_buf->base;
|
||||
|
||||
ttm_bo_unref(&bo);
|
||||
}
|
||||
}
|
||||
|
||||
static inline struct vmw_dma_buffer *vmw_dmabuf_reference(struct vmw_dma_buffer *buf)
|
||||
{
|
||||
if (ttm_bo_reference(&buf->base))
|
||||
return buf;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static inline struct ttm_mem_global *vmw_mem_glob(struct vmw_private *dev_priv)
|
||||
{
|
||||
return (struct ttm_mem_global *) dev_priv->mem_global_ref.object;
|
||||
}
|
||||
#endif
|
1795
drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
Normal file
1795
drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
Normal file
File diff suppressed because it is too large
Load diff
657
drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
Normal file
657
drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
Normal file
|
@ -0,0 +1,657 @@
|
|||
/**************************************************************************
|
||||
*
|
||||
* Copyright © 2007 David Airlie
|
||||
* Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
|
||||
* All Rights Reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the
|
||||
* "Software"), to deal in the Software without restriction, including
|
||||
* without limitation the rights to use, copy, modify, merge, publish,
|
||||
* distribute, sub license, and/or sell copies of the Software, and to
|
||||
* permit persons to whom the Software is furnished to do so, subject to
|
||||
* the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the
|
||||
* next paragraph) shall be included in all copies or substantial portions
|
||||
* of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
|
||||
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
|
||||
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
|
||||
* USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
**************************************************************************/
|
||||
|
||||
#include <linux/export.h>
|
||||
|
||||
#include <drm/drmP.h>
|
||||
#include "vmwgfx_drv.h"
|
||||
|
||||
#include <drm/ttm/ttm_placement.h>
|
||||
|
||||
#define VMW_DIRTY_DELAY (HZ / 30)
|
||||
|
||||
struct vmw_fb_par {
|
||||
struct vmw_private *vmw_priv;
|
||||
|
||||
void *vmalloc;
|
||||
|
||||
struct vmw_dma_buffer *vmw_bo;
|
||||
struct ttm_bo_kmap_obj map;
|
||||
|
||||
u32 pseudo_palette[17];
|
||||
|
||||
unsigned depth;
|
||||
unsigned bpp;
|
||||
|
||||
unsigned max_width;
|
||||
unsigned max_height;
|
||||
|
||||
void *bo_ptr;
|
||||
unsigned bo_size;
|
||||
bool bo_iowrite;
|
||||
|
||||
struct {
|
||||
spinlock_t lock;
|
||||
bool active;
|
||||
unsigned x1;
|
||||
unsigned y1;
|
||||
unsigned x2;
|
||||
unsigned y2;
|
||||
} dirty;
|
||||
};
|
||||
|
||||
static int vmw_fb_setcolreg(unsigned regno, unsigned red, unsigned green,
|
||||
unsigned blue, unsigned transp,
|
||||
struct fb_info *info)
|
||||
{
|
||||
struct vmw_fb_par *par = info->par;
|
||||
u32 *pal = par->pseudo_palette;
|
||||
|
||||
if (regno > 15) {
|
||||
DRM_ERROR("Bad regno %u.\n", regno);
|
||||
return 1;
|
||||
}
|
||||
|
||||
switch (par->depth) {
|
||||
case 24:
|
||||
case 32:
|
||||
pal[regno] = ((red & 0xff00) << 8) |
|
||||
(green & 0xff00) |
|
||||
((blue & 0xff00) >> 8);
|
||||
break;
|
||||
default:
|
||||
DRM_ERROR("Bad depth %u, bpp %u.\n", par->depth, par->bpp);
|
||||
return 1;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int vmw_fb_check_var(struct fb_var_screeninfo *var,
|
||||
struct fb_info *info)
|
||||
{
|
||||
int depth = var->bits_per_pixel;
|
||||
struct vmw_fb_par *par = info->par;
|
||||
struct vmw_private *vmw_priv = par->vmw_priv;
|
||||
|
||||
switch (var->bits_per_pixel) {
|
||||
case 32:
|
||||
depth = (var->transp.length > 0) ? 32 : 24;
|
||||
break;
|
||||
default:
|
||||
DRM_ERROR("Bad bpp %u.\n", var->bits_per_pixel);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
switch (depth) {
|
||||
case 24:
|
||||
var->red.offset = 16;
|
||||
var->green.offset = 8;
|
||||
var->blue.offset = 0;
|
||||
var->red.length = 8;
|
||||
var->green.length = 8;
|
||||
var->blue.length = 8;
|
||||
var->transp.length = 0;
|
||||
var->transp.offset = 0;
|
||||
break;
|
||||
case 32:
|
||||
var->red.offset = 16;
|
||||
var->green.offset = 8;
|
||||
var->blue.offset = 0;
|
||||
var->red.length = 8;
|
||||
var->green.length = 8;
|
||||
var->blue.length = 8;
|
||||
var->transp.length = 8;
|
||||
var->transp.offset = 24;
|
||||
break;
|
||||
default:
|
||||
DRM_ERROR("Bad depth %u.\n", depth);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (!(vmw_priv->capabilities & SVGA_CAP_DISPLAY_TOPOLOGY) &&
|
||||
(var->xoffset != 0 || var->yoffset != 0)) {
|
||||
DRM_ERROR("Can not handle panning without display topology\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if ((var->xoffset + var->xres) > par->max_width ||
|
||||
(var->yoffset + var->yres) > par->max_height) {
|
||||
DRM_ERROR("Requested geom can not fit in framebuffer\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (!vmw_kms_validate_mode_vram(vmw_priv,
|
||||
var->xres * var->bits_per_pixel/8,
|
||||
var->yoffset + var->yres)) {
|
||||
DRM_ERROR("Requested geom can not fit in framebuffer\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int vmw_fb_set_par(struct fb_info *info)
|
||||
{
|
||||
struct vmw_fb_par *par = info->par;
|
||||
struct vmw_private *vmw_priv = par->vmw_priv;
|
||||
int ret;
|
||||
|
||||
info->fix.line_length = info->var.xres * info->var.bits_per_pixel/8;
|
||||
|
||||
ret = vmw_kms_write_svga(vmw_priv, info->var.xres, info->var.yres,
|
||||
info->fix.line_length,
|
||||
par->bpp, par->depth);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (vmw_priv->capabilities & SVGA_CAP_DISPLAY_TOPOLOGY) {
|
||||
/* TODO check if pitch and offset changes */
|
||||
vmw_write(vmw_priv, SVGA_REG_NUM_GUEST_DISPLAYS, 1);
|
||||
vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, 0);
|
||||
vmw_write(vmw_priv, SVGA_REG_DISPLAY_IS_PRIMARY, true);
|
||||
vmw_write(vmw_priv, SVGA_REG_DISPLAY_POSITION_X, info->var.xoffset);
|
||||
vmw_write(vmw_priv, SVGA_REG_DISPLAY_POSITION_Y, info->var.yoffset);
|
||||
vmw_write(vmw_priv, SVGA_REG_DISPLAY_WIDTH, info->var.xres);
|
||||
vmw_write(vmw_priv, SVGA_REG_DISPLAY_HEIGHT, info->var.yres);
|
||||
vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, SVGA_ID_INVALID);
|
||||
}
|
||||
|
||||
/* This is really helpful since if this fails the user
|
||||
* can probably not see anything on the screen.
|
||||
*/
|
||||
WARN_ON(vmw_read(vmw_priv, SVGA_REG_FB_OFFSET) != 0);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int vmw_fb_pan_display(struct fb_var_screeninfo *var,
|
||||
struct fb_info *info)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int vmw_fb_blank(int blank, struct fb_info *info)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Dirty code
|
||||
*/
|
||||
|
||||
static void vmw_fb_dirty_flush(struct vmw_fb_par *par)
|
||||
{
|
||||
struct vmw_private *vmw_priv = par->vmw_priv;
|
||||
struct fb_info *info = vmw_priv->fb_info;
|
||||
int stride = (info->fix.line_length / 4);
|
||||
int *src = (int *)info->screen_base;
|
||||
__le32 __iomem *vram_mem = par->bo_ptr;
|
||||
unsigned long flags;
|
||||
unsigned x, y, w, h;
|
||||
int i, k;
|
||||
struct {
|
||||
uint32_t header;
|
||||
SVGAFifoCmdUpdate body;
|
||||
} *cmd;
|
||||
|
||||
if (vmw_priv->suspended)
|
||||
return;
|
||||
|
||||
spin_lock_irqsave(&par->dirty.lock, flags);
|
||||
if (!par->dirty.active) {
|
||||
spin_unlock_irqrestore(&par->dirty.lock, flags);
|
||||
return;
|
||||
}
|
||||
x = par->dirty.x1;
|
||||
y = par->dirty.y1;
|
||||
w = min(par->dirty.x2, info->var.xres) - x;
|
||||
h = min(par->dirty.y2, info->var.yres) - y;
|
||||
par->dirty.x1 = par->dirty.x2 = 0;
|
||||
par->dirty.y1 = par->dirty.y2 = 0;
|
||||
spin_unlock_irqrestore(&par->dirty.lock, flags);
|
||||
|
||||
for (i = y * stride; i < info->fix.smem_len / 4; i += stride) {
|
||||
for (k = i+x; k < i+x+w && k < info->fix.smem_len / 4; k++)
|
||||
iowrite32(src[k], vram_mem + k);
|
||||
}
|
||||
|
||||
#if 0
|
||||
DRM_INFO("%s, (%u, %u) (%ux%u)\n", __func__, x, y, w, h);
|
||||
#endif
|
||||
|
||||
cmd = vmw_fifo_reserve(vmw_priv, sizeof(*cmd));
|
||||
if (unlikely(cmd == NULL)) {
|
||||
DRM_ERROR("Fifo reserve failed.\n");
|
||||
return;
|
||||
}
|
||||
|
||||
cmd->header = cpu_to_le32(SVGA_CMD_UPDATE);
|
||||
cmd->body.x = cpu_to_le32(x);
|
||||
cmd->body.y = cpu_to_le32(y);
|
||||
cmd->body.width = cpu_to_le32(w);
|
||||
cmd->body.height = cpu_to_le32(h);
|
||||
vmw_fifo_commit(vmw_priv, sizeof(*cmd));
|
||||
}
|
||||
|
||||
static void vmw_fb_dirty_mark(struct vmw_fb_par *par,
|
||||
unsigned x1, unsigned y1,
|
||||
unsigned width, unsigned height)
|
||||
{
|
||||
struct fb_info *info = par->vmw_priv->fb_info;
|
||||
unsigned long flags;
|
||||
unsigned x2 = x1 + width;
|
||||
unsigned y2 = y1 + height;
|
||||
|
||||
spin_lock_irqsave(&par->dirty.lock, flags);
|
||||
if (par->dirty.x1 == par->dirty.x2) {
|
||||
par->dirty.x1 = x1;
|
||||
par->dirty.y1 = y1;
|
||||
par->dirty.x2 = x2;
|
||||
par->dirty.y2 = y2;
|
||||
/* if we are active start the dirty work
|
||||
* we share the work with the defio system */
|
||||
if (par->dirty.active)
|
||||
schedule_delayed_work(&info->deferred_work, VMW_DIRTY_DELAY);
|
||||
} else {
|
||||
if (x1 < par->dirty.x1)
|
||||
par->dirty.x1 = x1;
|
||||
if (y1 < par->dirty.y1)
|
||||
par->dirty.y1 = y1;
|
||||
if (x2 > par->dirty.x2)
|
||||
par->dirty.x2 = x2;
|
||||
if (y2 > par->dirty.y2)
|
||||
par->dirty.y2 = y2;
|
||||
}
|
||||
spin_unlock_irqrestore(&par->dirty.lock, flags);
|
||||
}
|
||||
|
||||
static void vmw_deferred_io(struct fb_info *info,
|
||||
struct list_head *pagelist)
|
||||
{
|
||||
struct vmw_fb_par *par = info->par;
|
||||
unsigned long start, end, min, max;
|
||||
unsigned long flags;
|
||||
struct page *page;
|
||||
int y1, y2;
|
||||
|
||||
min = ULONG_MAX;
|
||||
max = 0;
|
||||
list_for_each_entry(page, pagelist, lru) {
|
||||
start = page->index << PAGE_SHIFT;
|
||||
end = start + PAGE_SIZE - 1;
|
||||
min = min(min, start);
|
||||
max = max(max, end);
|
||||
}
|
||||
|
||||
if (min < max) {
|
||||
y1 = min / info->fix.line_length;
|
||||
y2 = (max / info->fix.line_length) + 1;
|
||||
|
||||
spin_lock_irqsave(&par->dirty.lock, flags);
|
||||
par->dirty.x1 = 0;
|
||||
par->dirty.y1 = y1;
|
||||
par->dirty.x2 = info->var.xres;
|
||||
par->dirty.y2 = y2;
|
||||
spin_unlock_irqrestore(&par->dirty.lock, flags);
|
||||
}
|
||||
|
||||
vmw_fb_dirty_flush(par);
|
||||
};
|
||||
|
||||
struct fb_deferred_io vmw_defio = {
|
||||
.delay = VMW_DIRTY_DELAY,
|
||||
.deferred_io = vmw_deferred_io,
|
||||
};
|
||||
|
||||
/*
|
||||
* Draw code
|
||||
*/
|
||||
|
||||
static void vmw_fb_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
|
||||
{
|
||||
cfb_fillrect(info, rect);
|
||||
vmw_fb_dirty_mark(info->par, rect->dx, rect->dy,
|
||||
rect->width, rect->height);
|
||||
}
|
||||
|
||||
static void vmw_fb_copyarea(struct fb_info *info, const struct fb_copyarea *region)
|
||||
{
|
||||
cfb_copyarea(info, region);
|
||||
vmw_fb_dirty_mark(info->par, region->dx, region->dy,
|
||||
region->width, region->height);
|
||||
}
|
||||
|
||||
static void vmw_fb_imageblit(struct fb_info *info, const struct fb_image *image)
|
||||
{
|
||||
cfb_imageblit(info, image);
|
||||
vmw_fb_dirty_mark(info->par, image->dx, image->dy,
|
||||
image->width, image->height);
|
||||
}
|
||||
|
||||
/*
|
||||
* Bring up code
|
||||
*/
|
||||
|
||||
static struct fb_ops vmw_fb_ops = {
|
||||
.owner = THIS_MODULE,
|
||||
.fb_check_var = vmw_fb_check_var,
|
||||
.fb_set_par = vmw_fb_set_par,
|
||||
.fb_setcolreg = vmw_fb_setcolreg,
|
||||
.fb_fillrect = vmw_fb_fillrect,
|
||||
.fb_copyarea = vmw_fb_copyarea,
|
||||
.fb_imageblit = vmw_fb_imageblit,
|
||||
.fb_pan_display = vmw_fb_pan_display,
|
||||
.fb_blank = vmw_fb_blank,
|
||||
};
|
||||
|
||||
static int vmw_fb_create_bo(struct vmw_private *vmw_priv,
|
||||
size_t size, struct vmw_dma_buffer **out)
|
||||
{
|
||||
struct vmw_dma_buffer *vmw_bo;
|
||||
struct ttm_placement ne_placement = vmw_vram_ne_placement;
|
||||
int ret;
|
||||
|
||||
ne_placement.lpfn = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
|
||||
|
||||
/* interuptable? */
|
||||
ret = ttm_write_lock(&vmw_priv->fbdev_master.lock, false);
|
||||
if (unlikely(ret != 0))
|
||||
return ret;
|
||||
|
||||
vmw_bo = kmalloc(sizeof(*vmw_bo), GFP_KERNEL);
|
||||
if (!vmw_bo)
|
||||
goto err_unlock;
|
||||
|
||||
ret = vmw_dmabuf_init(vmw_priv, vmw_bo, size,
|
||||
&ne_placement,
|
||||
false,
|
||||
&vmw_dmabuf_bo_free);
|
||||
if (unlikely(ret != 0))
|
||||
goto err_unlock; /* init frees the buffer on failure */
|
||||
|
||||
*out = vmw_bo;
|
||||
|
||||
ttm_write_unlock(&vmw_priv->fbdev_master.lock);
|
||||
|
||||
return 0;
|
||||
|
||||
err_unlock:
|
||||
ttm_write_unlock(&vmw_priv->fbdev_master.lock);
|
||||
return ret;
|
||||
}
|
||||
|
||||
int vmw_fb_init(struct vmw_private *vmw_priv)
|
||||
{
|
||||
struct device *device = &vmw_priv->dev->pdev->dev;
|
||||
struct vmw_fb_par *par;
|
||||
struct fb_info *info;
|
||||
unsigned initial_width, initial_height;
|
||||
unsigned fb_width, fb_height;
|
||||
unsigned fb_bpp, fb_depth, fb_offset, fb_pitch, fb_size;
|
||||
int ret;
|
||||
|
||||
fb_bpp = 32;
|
||||
fb_depth = 24;
|
||||
|
||||
/* XXX As shouldn't these be as well. */
|
||||
fb_width = min(vmw_priv->fb_max_width, (unsigned)2048);
|
||||
fb_height = min(vmw_priv->fb_max_height, (unsigned)2048);
|
||||
|
||||
initial_width = min(vmw_priv->initial_width, fb_width);
|
||||
initial_height = min(vmw_priv->initial_height, fb_height);
|
||||
|
||||
fb_pitch = fb_width * fb_bpp / 8;
|
||||
fb_size = fb_pitch * fb_height;
|
||||
fb_offset = vmw_read(vmw_priv, SVGA_REG_FB_OFFSET);
|
||||
|
||||
info = framebuffer_alloc(sizeof(*par), device);
|
||||
if (!info)
|
||||
return -ENOMEM;
|
||||
|
||||
/*
|
||||
* Par
|
||||
*/
|
||||
vmw_priv->fb_info = info;
|
||||
par = info->par;
|
||||
par->vmw_priv = vmw_priv;
|
||||
par->depth = fb_depth;
|
||||
par->bpp = fb_bpp;
|
||||
par->vmalloc = NULL;
|
||||
par->max_width = fb_width;
|
||||
par->max_height = fb_height;
|
||||
|
||||
/*
|
||||
* Create buffers and alloc memory
|
||||
*/
|
||||
par->vmalloc = vmalloc(fb_size);
|
||||
if (unlikely(par->vmalloc == NULL)) {
|
||||
ret = -ENOMEM;
|
||||
goto err_free;
|
||||
}
|
||||
|
||||
ret = vmw_fb_create_bo(vmw_priv, fb_size, &par->vmw_bo);
|
||||
if (unlikely(ret != 0))
|
||||
goto err_free;
|
||||
|
||||
ret = ttm_bo_kmap(&par->vmw_bo->base,
|
||||
0,
|
||||
par->vmw_bo->base.num_pages,
|
||||
&par->map);
|
||||
if (unlikely(ret != 0))
|
||||
goto err_unref;
|
||||
par->bo_ptr = ttm_kmap_obj_virtual(&par->map, &par->bo_iowrite);
|
||||
par->bo_size = fb_size;
|
||||
|
||||
/*
|
||||
* Fixed and var
|
||||
*/
|
||||
strcpy(info->fix.id, "svgadrmfb");
|
||||
info->fix.type = FB_TYPE_PACKED_PIXELS;
|
||||
info->fix.visual = FB_VISUAL_TRUECOLOR;
|
||||
info->fix.type_aux = 0;
|
||||
info->fix.xpanstep = 1; /* doing it in hw */
|
||||
info->fix.ypanstep = 1; /* doing it in hw */
|
||||
info->fix.ywrapstep = 0;
|
||||
info->fix.accel = FB_ACCEL_NONE;
|
||||
info->fix.line_length = fb_pitch;
|
||||
|
||||
info->fix.smem_start = 0;
|
||||
info->fix.smem_len = fb_size;
|
||||
|
||||
info->pseudo_palette = par->pseudo_palette;
|
||||
info->screen_base = par->vmalloc;
|
||||
info->screen_size = fb_size;
|
||||
|
||||
info->flags = FBINFO_DEFAULT;
|
||||
info->fbops = &vmw_fb_ops;
|
||||
|
||||
/* 24 depth per default */
|
||||
info->var.red.offset = 16;
|
||||
info->var.green.offset = 8;
|
||||
info->var.blue.offset = 0;
|
||||
info->var.red.length = 8;
|
||||
info->var.green.length = 8;
|
||||
info->var.blue.length = 8;
|
||||
info->var.transp.offset = 0;
|
||||
info->var.transp.length = 0;
|
||||
|
||||
info->var.xres_virtual = fb_width;
|
||||
info->var.yres_virtual = fb_height;
|
||||
info->var.bits_per_pixel = par->bpp;
|
||||
info->var.xoffset = 0;
|
||||
info->var.yoffset = 0;
|
||||
info->var.activate = FB_ACTIVATE_NOW;
|
||||
info->var.height = -1;
|
||||
info->var.width = -1;
|
||||
|
||||
info->var.xres = initial_width;
|
||||
info->var.yres = initial_height;
|
||||
|
||||
/* Use default scratch pixmap (info->pixmap.flags = FB_PIXMAP_SYSTEM) */
|
||||
|
||||
info->apertures = alloc_apertures(1);
|
||||
if (!info->apertures) {
|
||||
ret = -ENOMEM;
|
||||
goto err_aper;
|
||||
}
|
||||
info->apertures->ranges[0].base = vmw_priv->vram_start;
|
||||
info->apertures->ranges[0].size = vmw_priv->vram_size;
|
||||
|
||||
/*
|
||||
* Dirty & Deferred IO
|
||||
*/
|
||||
par->dirty.x1 = par->dirty.x2 = 0;
|
||||
par->dirty.y1 = par->dirty.y2 = 0;
|
||||
par->dirty.active = true;
|
||||
spin_lock_init(&par->dirty.lock);
|
||||
info->fbdefio = &vmw_defio;
|
||||
fb_deferred_io_init(info);
|
||||
|
||||
ret = register_framebuffer(info);
|
||||
if (unlikely(ret != 0))
|
||||
goto err_defio;
|
||||
|
||||
return 0;
|
||||
|
||||
err_defio:
|
||||
fb_deferred_io_cleanup(info);
|
||||
err_aper:
|
||||
ttm_bo_kunmap(&par->map);
|
||||
err_unref:
|
||||
ttm_bo_unref((struct ttm_buffer_object **)&par->vmw_bo);
|
||||
err_free:
|
||||
vfree(par->vmalloc);
|
||||
framebuffer_release(info);
|
||||
vmw_priv->fb_info = NULL;
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
int vmw_fb_close(struct vmw_private *vmw_priv)
|
||||
{
|
||||
struct fb_info *info;
|
||||
struct vmw_fb_par *par;
|
||||
struct ttm_buffer_object *bo;
|
||||
|
||||
if (!vmw_priv->fb_info)
|
||||
return 0;
|
||||
|
||||
info = vmw_priv->fb_info;
|
||||
par = info->par;
|
||||
bo = &par->vmw_bo->base;
|
||||
par->vmw_bo = NULL;
|
||||
|
||||
/* ??? order */
|
||||
fb_deferred_io_cleanup(info);
|
||||
unregister_framebuffer(info);
|
||||
|
||||
ttm_bo_kunmap(&par->map);
|
||||
ttm_bo_unref(&bo);
|
||||
|
||||
vfree(par->vmalloc);
|
||||
framebuffer_release(info);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int vmw_fb_off(struct vmw_private *vmw_priv)
|
||||
{
|
||||
struct fb_info *info;
|
||||
struct vmw_fb_par *par;
|
||||
unsigned long flags;
|
||||
|
||||
if (!vmw_priv->fb_info)
|
||||
return -EINVAL;
|
||||
|
||||
info = vmw_priv->fb_info;
|
||||
par = info->par;
|
||||
|
||||
spin_lock_irqsave(&par->dirty.lock, flags);
|
||||
par->dirty.active = false;
|
||||
spin_unlock_irqrestore(&par->dirty.lock, flags);
|
||||
|
||||
flush_delayed_work(&info->deferred_work);
|
||||
|
||||
par->bo_ptr = NULL;
|
||||
ttm_bo_kunmap(&par->map);
|
||||
|
||||
vmw_dmabuf_unpin(vmw_priv, par->vmw_bo, false);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int vmw_fb_on(struct vmw_private *vmw_priv)
|
||||
{
|
||||
struct fb_info *info;
|
||||
struct vmw_fb_par *par;
|
||||
unsigned long flags;
|
||||
bool dummy;
|
||||
int ret;
|
||||
|
||||
if (!vmw_priv->fb_info)
|
||||
return -EINVAL;
|
||||
|
||||
info = vmw_priv->fb_info;
|
||||
par = info->par;
|
||||
|
||||
/* we are already active */
|
||||
if (par->bo_ptr != NULL)
|
||||
return 0;
|
||||
|
||||
/* Make sure that all overlays are stoped when we take over */
|
||||
vmw_overlay_stop_all(vmw_priv);
|
||||
|
||||
ret = vmw_dmabuf_to_start_of_vram(vmw_priv, par->vmw_bo, true, false);
|
||||
if (unlikely(ret != 0)) {
|
||||
DRM_ERROR("could not move buffer to start of VRAM\n");
|
||||
goto err_no_buffer;
|
||||
}
|
||||
|
||||
ret = ttm_bo_kmap(&par->vmw_bo->base,
|
||||
0,
|
||||
par->vmw_bo->base.num_pages,
|
||||
&par->map);
|
||||
BUG_ON(ret != 0);
|
||||
par->bo_ptr = ttm_kmap_obj_virtual(&par->map, &dummy);
|
||||
|
||||
spin_lock_irqsave(&par->dirty.lock, flags);
|
||||
par->dirty.active = true;
|
||||
spin_unlock_irqrestore(&par->dirty.lock, flags);
|
||||
|
||||
err_no_buffer:
|
||||
vmw_fb_set_par(info);
|
||||
|
||||
vmw_fb_dirty_mark(par, 0, 0, info->var.xres, info->var.yres);
|
||||
|
||||
/* If there already was stuff dirty we wont
|
||||
* schedule a new work, so lets do it now */
|
||||
schedule_delayed_work(&info->deferred_work, 0);
|
||||
|
||||
return 0;
|
||||
}
|
1154
drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
Normal file
1154
drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
Normal file
File diff suppressed because it is too large
Load diff
120
drivers/gpu/drm/vmwgfx/vmwgfx_fence.h
Normal file
120
drivers/gpu/drm/vmwgfx/vmwgfx_fence.h
Normal file
|
@ -0,0 +1,120 @@
|
|||
/**************************************************************************
|
||||
*
|
||||
* Copyright © 2011 VMware, Inc., Palo Alto, CA., USA
|
||||
* All Rights Reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the
|
||||
* "Software"), to deal in the Software without restriction, including
|
||||
* without limitation the rights to use, copy, modify, merge, publish,
|
||||
* distribute, sub license, and/or sell copies of the Software, and to
|
||||
* permit persons to whom the Software is furnished to do so, subject to
|
||||
* the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the
|
||||
* next paragraph) shall be included in all copies or substantial portions
|
||||
* of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
|
||||
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
|
||||
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
|
||||
* USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
**************************************************************************/
|
||||
|
||||
#ifndef _VMWGFX_FENCE_H_
|
||||
|
||||
#define VMW_FENCE_WAIT_TIMEOUT (5*HZ)
|
||||
|
||||
struct vmw_private;
|
||||
|
||||
struct vmw_fence_manager;
|
||||
|
||||
/**
|
||||
*
|
||||
*
|
||||
*/
|
||||
enum vmw_action_type {
|
||||
VMW_ACTION_EVENT = 0,
|
||||
VMW_ACTION_MAX
|
||||
};
|
||||
|
||||
struct vmw_fence_action {
|
||||
struct list_head head;
|
||||
enum vmw_action_type type;
|
||||
void (*seq_passed) (struct vmw_fence_action *action);
|
||||
void (*cleanup) (struct vmw_fence_action *action);
|
||||
};
|
||||
|
||||
struct vmw_fence_obj {
|
||||
struct kref kref;
|
||||
u32 seqno;
|
||||
|
||||
struct vmw_fence_manager *fman;
|
||||
struct list_head head;
|
||||
uint32_t signaled;
|
||||
uint32_t signal_mask;
|
||||
struct list_head seq_passed_actions;
|
||||
void (*destroy)(struct vmw_fence_obj *fence);
|
||||
wait_queue_head_t queue;
|
||||
};
|
||||
|
||||
extern struct vmw_fence_manager *
|
||||
vmw_fence_manager_init(struct vmw_private *dev_priv);
|
||||
|
||||
extern void vmw_fence_manager_takedown(struct vmw_fence_manager *fman);
|
||||
|
||||
extern void vmw_fence_obj_unreference(struct vmw_fence_obj **fence_p);
|
||||
|
||||
extern struct vmw_fence_obj *
|
||||
vmw_fence_obj_reference(struct vmw_fence_obj *fence);
|
||||
|
||||
extern void vmw_fences_update(struct vmw_fence_manager *fman);
|
||||
|
||||
extern bool vmw_fence_obj_signaled(struct vmw_fence_obj *fence,
|
||||
uint32_t flags);
|
||||
|
||||
extern int vmw_fence_obj_wait(struct vmw_fence_obj *fence, uint32_t flags,
|
||||
bool lazy,
|
||||
bool interruptible, unsigned long timeout);
|
||||
|
||||
extern void vmw_fence_obj_flush(struct vmw_fence_obj *fence);
|
||||
|
||||
extern int vmw_fence_create(struct vmw_fence_manager *fman,
|
||||
uint32_t seqno,
|
||||
uint32_t mask,
|
||||
struct vmw_fence_obj **p_fence);
|
||||
|
||||
extern int vmw_user_fence_create(struct drm_file *file_priv,
|
||||
struct vmw_fence_manager *fman,
|
||||
uint32_t sequence,
|
||||
uint32_t mask,
|
||||
struct vmw_fence_obj **p_fence,
|
||||
uint32_t *p_handle);
|
||||
|
||||
extern void vmw_fence_fifo_up(struct vmw_fence_manager *fman);
|
||||
|
||||
extern void vmw_fence_fifo_down(struct vmw_fence_manager *fman);
|
||||
|
||||
extern int vmw_fence_obj_wait_ioctl(struct drm_device *dev, void *data,
|
||||
struct drm_file *file_priv);
|
||||
|
||||
extern int vmw_fence_obj_signaled_ioctl(struct drm_device *dev, void *data,
|
||||
struct drm_file *file_priv);
|
||||
|
||||
extern int vmw_fence_obj_unref_ioctl(struct drm_device *dev, void *data,
|
||||
struct drm_file *file_priv);
|
||||
extern int vmw_fence_event_ioctl(struct drm_device *dev, void *data,
|
||||
struct drm_file *file_priv);
|
||||
extern void vmw_event_fence_fpriv_gone(struct vmw_fence_manager *fman,
|
||||
struct list_head *event_list);
|
||||
extern int vmw_event_fence_action_queue(struct drm_file *filee_priv,
|
||||
struct vmw_fence_obj *fence,
|
||||
struct drm_pending_event *event,
|
||||
uint32_t *tv_sec,
|
||||
uint32_t *tv_usec,
|
||||
bool interruptible);
|
||||
#endif /* _VMWGFX_FENCE_H_ */
|
569
drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
Normal file
569
drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
Normal file
|
@ -0,0 +1,569 @@
|
|||
/**************************************************************************
|
||||
*
|
||||
* Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
|
||||
* All Rights Reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the
|
||||
* "Software"), to deal in the Software without restriction, including
|
||||
* without limitation the rights to use, copy, modify, merge, publish,
|
||||
* distribute, sub license, and/or sell copies of the Software, and to
|
||||
* permit persons to whom the Software is furnished to do so, subject to
|
||||
* the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the
|
||||
* next paragraph) shall be included in all copies or substantial portions
|
||||
* of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
|
||||
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
|
||||
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
|
||||
* USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
**************************************************************************/
|
||||
|
||||
#include "vmwgfx_drv.h"
|
||||
#include <drm/drmP.h>
|
||||
#include <drm/ttm/ttm_placement.h>
|
||||
|
||||
bool vmw_fifo_have_3d(struct vmw_private *dev_priv)
|
||||
{
|
||||
__le32 __iomem *fifo_mem = dev_priv->mmio_virt;
|
||||
uint32_t fifo_min, hwversion;
|
||||
const struct vmw_fifo_state *fifo = &dev_priv->fifo;
|
||||
|
||||
if (!(dev_priv->capabilities & SVGA_CAP_EXTENDED_FIFO))
|
||||
return false;
|
||||
|
||||
fifo_min = ioread32(fifo_mem + SVGA_FIFO_MIN);
|
||||
if (fifo_min <= SVGA_FIFO_3D_HWVERSION * sizeof(unsigned int))
|
||||
return false;
|
||||
|
||||
hwversion = ioread32(fifo_mem +
|
||||
((fifo->capabilities &
|
||||
SVGA_FIFO_CAP_3D_HWVERSION_REVISED) ?
|
||||
SVGA_FIFO_3D_HWVERSION_REVISED :
|
||||
SVGA_FIFO_3D_HWVERSION));
|
||||
|
||||
if (hwversion == 0)
|
||||
return false;
|
||||
|
||||
if (hwversion < SVGA3D_HWVERSION_WS8_B1)
|
||||
return false;
|
||||
|
||||
/* Non-Screen Object path does not support surfaces */
|
||||
if (!dev_priv->sou_priv)
|
||||
return false;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
bool vmw_fifo_have_pitchlock(struct vmw_private *dev_priv)
|
||||
{
|
||||
__le32 __iomem *fifo_mem = dev_priv->mmio_virt;
|
||||
uint32_t caps;
|
||||
|
||||
if (!(dev_priv->capabilities & SVGA_CAP_EXTENDED_FIFO))
|
||||
return false;
|
||||
|
||||
caps = ioread32(fifo_mem + SVGA_FIFO_CAPABILITIES);
|
||||
if (caps & SVGA_FIFO_CAP_PITCHLOCK)
|
||||
return true;
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
|
||||
{
|
||||
__le32 __iomem *fifo_mem = dev_priv->mmio_virt;
|
||||
uint32_t max;
|
||||
uint32_t min;
|
||||
uint32_t dummy;
|
||||
|
||||
fifo->static_buffer_size = VMWGFX_FIFO_STATIC_SIZE;
|
||||
fifo->static_buffer = vmalloc(fifo->static_buffer_size);
|
||||
if (unlikely(fifo->static_buffer == NULL))
|
||||
return -ENOMEM;
|
||||
|
||||
fifo->dynamic_buffer = NULL;
|
||||
fifo->reserved_size = 0;
|
||||
fifo->using_bounce_buffer = false;
|
||||
|
||||
mutex_init(&fifo->fifo_mutex);
|
||||
init_rwsem(&fifo->rwsem);
|
||||
|
||||
/*
|
||||
* Allow mapping the first page read-only to user-space.
|
||||
*/
|
||||
|
||||
DRM_INFO("width %d\n", vmw_read(dev_priv, SVGA_REG_WIDTH));
|
||||
DRM_INFO("height %d\n", vmw_read(dev_priv, SVGA_REG_HEIGHT));
|
||||
DRM_INFO("bpp %d\n", vmw_read(dev_priv, SVGA_REG_BITS_PER_PIXEL));
|
||||
|
||||
mutex_lock(&dev_priv->hw_mutex);
|
||||
dev_priv->enable_state = vmw_read(dev_priv, SVGA_REG_ENABLE);
|
||||
dev_priv->config_done_state = vmw_read(dev_priv, SVGA_REG_CONFIG_DONE);
|
||||
dev_priv->traces_state = vmw_read(dev_priv, SVGA_REG_TRACES);
|
||||
vmw_write(dev_priv, SVGA_REG_ENABLE, 1);
|
||||
|
||||
min = 4;
|
||||
if (dev_priv->capabilities & SVGA_CAP_EXTENDED_FIFO)
|
||||
min = vmw_read(dev_priv, SVGA_REG_MEM_REGS);
|
||||
min <<= 2;
|
||||
|
||||
if (min < PAGE_SIZE)
|
||||
min = PAGE_SIZE;
|
||||
|
||||
iowrite32(min, fifo_mem + SVGA_FIFO_MIN);
|
||||
iowrite32(dev_priv->mmio_size, fifo_mem + SVGA_FIFO_MAX);
|
||||
wmb();
|
||||
iowrite32(min, fifo_mem + SVGA_FIFO_NEXT_CMD);
|
||||
iowrite32(min, fifo_mem + SVGA_FIFO_STOP);
|
||||
iowrite32(0, fifo_mem + SVGA_FIFO_BUSY);
|
||||
mb();
|
||||
|
||||
vmw_write(dev_priv, SVGA_REG_CONFIG_DONE, 1);
|
||||
mutex_unlock(&dev_priv->hw_mutex);
|
||||
|
||||
max = ioread32(fifo_mem + SVGA_FIFO_MAX);
|
||||
min = ioread32(fifo_mem + SVGA_FIFO_MIN);
|
||||
fifo->capabilities = ioread32(fifo_mem + SVGA_FIFO_CAPABILITIES);
|
||||
|
||||
DRM_INFO("Fifo max 0x%08x min 0x%08x cap 0x%08x\n",
|
||||
(unsigned int) max,
|
||||
(unsigned int) min,
|
||||
(unsigned int) fifo->capabilities);
|
||||
|
||||
atomic_set(&dev_priv->marker_seq, dev_priv->last_read_seqno);
|
||||
iowrite32(dev_priv->last_read_seqno, fifo_mem + SVGA_FIFO_FENCE);
|
||||
vmw_marker_queue_init(&fifo->marker_queue);
|
||||
return vmw_fifo_send_fence(dev_priv, &dummy);
|
||||
}
|
||||
|
||||
void vmw_fifo_ping_host(struct vmw_private *dev_priv, uint32_t reason)
|
||||
{
|
||||
__le32 __iomem *fifo_mem = dev_priv->mmio_virt;
|
||||
|
||||
mutex_lock(&dev_priv->hw_mutex);
|
||||
|
||||
if (unlikely(ioread32(fifo_mem + SVGA_FIFO_BUSY) == 0)) {
|
||||
iowrite32(1, fifo_mem + SVGA_FIFO_BUSY);
|
||||
vmw_write(dev_priv, SVGA_REG_SYNC, reason);
|
||||
}
|
||||
|
||||
mutex_unlock(&dev_priv->hw_mutex);
|
||||
}
|
||||
|
||||
void vmw_fifo_release(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
|
||||
{
|
||||
__le32 __iomem *fifo_mem = dev_priv->mmio_virt;
|
||||
|
||||
mutex_lock(&dev_priv->hw_mutex);
|
||||
|
||||
vmw_write(dev_priv, SVGA_REG_SYNC, SVGA_SYNC_GENERIC);
|
||||
while (vmw_read(dev_priv, SVGA_REG_BUSY) != 0)
|
||||
;
|
||||
|
||||
dev_priv->last_read_seqno = ioread32(fifo_mem + SVGA_FIFO_FENCE);
|
||||
|
||||
vmw_write(dev_priv, SVGA_REG_CONFIG_DONE,
|
||||
dev_priv->config_done_state);
|
||||
vmw_write(dev_priv, SVGA_REG_ENABLE,
|
||||
dev_priv->enable_state);
|
||||
vmw_write(dev_priv, SVGA_REG_TRACES,
|
||||
dev_priv->traces_state);
|
||||
|
||||
mutex_unlock(&dev_priv->hw_mutex);
|
||||
vmw_marker_queue_takedown(&fifo->marker_queue);
|
||||
|
||||
if (likely(fifo->static_buffer != NULL)) {
|
||||
vfree(fifo->static_buffer);
|
||||
fifo->static_buffer = NULL;
|
||||
}
|
||||
|
||||
if (likely(fifo->dynamic_buffer != NULL)) {
|
||||
vfree(fifo->dynamic_buffer);
|
||||
fifo->dynamic_buffer = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
static bool vmw_fifo_is_full(struct vmw_private *dev_priv, uint32_t bytes)
|
||||
{
|
||||
__le32 __iomem *fifo_mem = dev_priv->mmio_virt;
|
||||
uint32_t max = ioread32(fifo_mem + SVGA_FIFO_MAX);
|
||||
uint32_t next_cmd = ioread32(fifo_mem + SVGA_FIFO_NEXT_CMD);
|
||||
uint32_t min = ioread32(fifo_mem + SVGA_FIFO_MIN);
|
||||
uint32_t stop = ioread32(fifo_mem + SVGA_FIFO_STOP);
|
||||
|
||||
return ((max - next_cmd) + (stop - min) <= bytes);
|
||||
}
|
||||
|
||||
static int vmw_fifo_wait_noirq(struct vmw_private *dev_priv,
|
||||
uint32_t bytes, bool interruptible,
|
||||
unsigned long timeout)
|
||||
{
|
||||
int ret = 0;
|
||||
unsigned long end_jiffies = jiffies + timeout;
|
||||
DEFINE_WAIT(__wait);
|
||||
|
||||
DRM_INFO("Fifo wait noirq.\n");
|
||||
|
||||
for (;;) {
|
||||
prepare_to_wait(&dev_priv->fifo_queue, &__wait,
|
||||
(interruptible) ?
|
||||
TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE);
|
||||
if (!vmw_fifo_is_full(dev_priv, bytes))
|
||||
break;
|
||||
if (time_after_eq(jiffies, end_jiffies)) {
|
||||
ret = -EBUSY;
|
||||
DRM_ERROR("SVGA device lockup.\n");
|
||||
break;
|
||||
}
|
||||
schedule_timeout(1);
|
||||
if (interruptible && signal_pending(current)) {
|
||||
ret = -ERESTARTSYS;
|
||||
break;
|
||||
}
|
||||
}
|
||||
finish_wait(&dev_priv->fifo_queue, &__wait);
|
||||
wake_up_all(&dev_priv->fifo_queue);
|
||||
DRM_INFO("Fifo noirq exit.\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int vmw_fifo_wait(struct vmw_private *dev_priv,
|
||||
uint32_t bytes, bool interruptible,
|
||||
unsigned long timeout)
|
||||
{
|
||||
long ret = 1L;
|
||||
unsigned long irq_flags;
|
||||
|
||||
if (likely(!vmw_fifo_is_full(dev_priv, bytes)))
|
||||
return 0;
|
||||
|
||||
vmw_fifo_ping_host(dev_priv, SVGA_SYNC_FIFOFULL);
|
||||
if (!(dev_priv->capabilities & SVGA_CAP_IRQMASK))
|
||||
return vmw_fifo_wait_noirq(dev_priv, bytes,
|
||||
interruptible, timeout);
|
||||
|
||||
mutex_lock(&dev_priv->hw_mutex);
|
||||
if (atomic_add_return(1, &dev_priv->fifo_queue_waiters) > 0) {
|
||||
spin_lock_irqsave(&dev_priv->irq_lock, irq_flags);
|
||||
outl(SVGA_IRQFLAG_FIFO_PROGRESS,
|
||||
dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
|
||||
dev_priv->irq_mask |= SVGA_IRQFLAG_FIFO_PROGRESS;
|
||||
vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask);
|
||||
spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags);
|
||||
}
|
||||
mutex_unlock(&dev_priv->hw_mutex);
|
||||
|
||||
if (interruptible)
|
||||
ret = wait_event_interruptible_timeout
|
||||
(dev_priv->fifo_queue,
|
||||
!vmw_fifo_is_full(dev_priv, bytes), timeout);
|
||||
else
|
||||
ret = wait_event_timeout
|
||||
(dev_priv->fifo_queue,
|
||||
!vmw_fifo_is_full(dev_priv, bytes), timeout);
|
||||
|
||||
if (unlikely(ret == 0))
|
||||
ret = -EBUSY;
|
||||
else if (likely(ret > 0))
|
||||
ret = 0;
|
||||
|
||||
mutex_lock(&dev_priv->hw_mutex);
|
||||
if (atomic_dec_and_test(&dev_priv->fifo_queue_waiters)) {
|
||||
spin_lock_irqsave(&dev_priv->irq_lock, irq_flags);
|
||||
dev_priv->irq_mask &= ~SVGA_IRQFLAG_FIFO_PROGRESS;
|
||||
vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask);
|
||||
spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags);
|
||||
}
|
||||
mutex_unlock(&dev_priv->hw_mutex);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* Reserve @bytes number of bytes in the fifo.
|
||||
*
|
||||
* This function will return NULL (error) on two conditions:
|
||||
* If it timeouts waiting for fifo space, or if @bytes is larger than the
|
||||
* available fifo space.
|
||||
*
|
||||
* Returns:
|
||||
* Pointer to the fifo, or null on error (possible hardware hang).
|
||||
*/
|
||||
void *vmw_fifo_reserve(struct vmw_private *dev_priv, uint32_t bytes)
|
||||
{
|
||||
struct vmw_fifo_state *fifo_state = &dev_priv->fifo;
|
||||
__le32 __iomem *fifo_mem = dev_priv->mmio_virt;
|
||||
uint32_t max;
|
||||
uint32_t min;
|
||||
uint32_t next_cmd;
|
||||
uint32_t reserveable = fifo_state->capabilities & SVGA_FIFO_CAP_RESERVE;
|
||||
int ret;
|
||||
|
||||
mutex_lock(&fifo_state->fifo_mutex);
|
||||
max = ioread32(fifo_mem + SVGA_FIFO_MAX);
|
||||
min = ioread32(fifo_mem + SVGA_FIFO_MIN);
|
||||
next_cmd = ioread32(fifo_mem + SVGA_FIFO_NEXT_CMD);
|
||||
|
||||
if (unlikely(bytes >= (max - min)))
|
||||
goto out_err;
|
||||
|
||||
BUG_ON(fifo_state->reserved_size != 0);
|
||||
BUG_ON(fifo_state->dynamic_buffer != NULL);
|
||||
|
||||
fifo_state->reserved_size = bytes;
|
||||
|
||||
while (1) {
|
||||
uint32_t stop = ioread32(fifo_mem + SVGA_FIFO_STOP);
|
||||
bool need_bounce = false;
|
||||
bool reserve_in_place = false;
|
||||
|
||||
if (next_cmd >= stop) {
|
||||
if (likely((next_cmd + bytes < max ||
|
||||
(next_cmd + bytes == max && stop > min))))
|
||||
reserve_in_place = true;
|
||||
|
||||
else if (vmw_fifo_is_full(dev_priv, bytes)) {
|
||||
ret = vmw_fifo_wait(dev_priv, bytes,
|
||||
false, 3 * HZ);
|
||||
if (unlikely(ret != 0))
|
||||
goto out_err;
|
||||
} else
|
||||
need_bounce = true;
|
||||
|
||||
} else {
|
||||
|
||||
if (likely((next_cmd + bytes < stop)))
|
||||
reserve_in_place = true;
|
||||
else {
|
||||
ret = vmw_fifo_wait(dev_priv, bytes,
|
||||
false, 3 * HZ);
|
||||
if (unlikely(ret != 0))
|
||||
goto out_err;
|
||||
}
|
||||
}
|
||||
|
||||
if (reserve_in_place) {
|
||||
if (reserveable || bytes <= sizeof(uint32_t)) {
|
||||
fifo_state->using_bounce_buffer = false;
|
||||
|
||||
if (reserveable)
|
||||
iowrite32(bytes, fifo_mem +
|
||||
SVGA_FIFO_RESERVED);
|
||||
return fifo_mem + (next_cmd >> 2);
|
||||
} else {
|
||||
need_bounce = true;
|
||||
}
|
||||
}
|
||||
|
||||
if (need_bounce) {
|
||||
fifo_state->using_bounce_buffer = true;
|
||||
if (bytes < fifo_state->static_buffer_size)
|
||||
return fifo_state->static_buffer;
|
||||
else {
|
||||
fifo_state->dynamic_buffer = vmalloc(bytes);
|
||||
return fifo_state->dynamic_buffer;
|
||||
}
|
||||
}
|
||||
}
|
||||
out_err:
|
||||
fifo_state->reserved_size = 0;
|
||||
mutex_unlock(&fifo_state->fifo_mutex);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static void vmw_fifo_res_copy(struct vmw_fifo_state *fifo_state,
|
||||
__le32 __iomem *fifo_mem,
|
||||
uint32_t next_cmd,
|
||||
uint32_t max, uint32_t min, uint32_t bytes)
|
||||
{
|
||||
uint32_t chunk_size = max - next_cmd;
|
||||
uint32_t rest;
|
||||
uint32_t *buffer = (fifo_state->dynamic_buffer != NULL) ?
|
||||
fifo_state->dynamic_buffer : fifo_state->static_buffer;
|
||||
|
||||
if (bytes < chunk_size)
|
||||
chunk_size = bytes;
|
||||
|
||||
iowrite32(bytes, fifo_mem + SVGA_FIFO_RESERVED);
|
||||
mb();
|
||||
memcpy_toio(fifo_mem + (next_cmd >> 2), buffer, chunk_size);
|
||||
rest = bytes - chunk_size;
|
||||
if (rest)
|
||||
memcpy_toio(fifo_mem + (min >> 2), buffer + (chunk_size >> 2),
|
||||
rest);
|
||||
}
|
||||
|
||||
static void vmw_fifo_slow_copy(struct vmw_fifo_state *fifo_state,
|
||||
__le32 __iomem *fifo_mem,
|
||||
uint32_t next_cmd,
|
||||
uint32_t max, uint32_t min, uint32_t bytes)
|
||||
{
|
||||
uint32_t *buffer = (fifo_state->dynamic_buffer != NULL) ?
|
||||
fifo_state->dynamic_buffer : fifo_state->static_buffer;
|
||||
|
||||
while (bytes > 0) {
|
||||
iowrite32(*buffer++, fifo_mem + (next_cmd >> 2));
|
||||
next_cmd += sizeof(uint32_t);
|
||||
if (unlikely(next_cmd == max))
|
||||
next_cmd = min;
|
||||
mb();
|
||||
iowrite32(next_cmd, fifo_mem + SVGA_FIFO_NEXT_CMD);
|
||||
mb();
|
||||
bytes -= sizeof(uint32_t);
|
||||
}
|
||||
}
|
||||
|
||||
void vmw_fifo_commit(struct vmw_private *dev_priv, uint32_t bytes)
|
||||
{
|
||||
struct vmw_fifo_state *fifo_state = &dev_priv->fifo;
|
||||
__le32 __iomem *fifo_mem = dev_priv->mmio_virt;
|
||||
uint32_t next_cmd = ioread32(fifo_mem + SVGA_FIFO_NEXT_CMD);
|
||||
uint32_t max = ioread32(fifo_mem + SVGA_FIFO_MAX);
|
||||
uint32_t min = ioread32(fifo_mem + SVGA_FIFO_MIN);
|
||||
bool reserveable = fifo_state->capabilities & SVGA_FIFO_CAP_RESERVE;
|
||||
|
||||
BUG_ON((bytes & 3) != 0);
|
||||
BUG_ON(bytes > fifo_state->reserved_size);
|
||||
|
||||
fifo_state->reserved_size = 0;
|
||||
|
||||
if (fifo_state->using_bounce_buffer) {
|
||||
if (reserveable)
|
||||
vmw_fifo_res_copy(fifo_state, fifo_mem,
|
||||
next_cmd, max, min, bytes);
|
||||
else
|
||||
vmw_fifo_slow_copy(fifo_state, fifo_mem,
|
||||
next_cmd, max, min, bytes);
|
||||
|
||||
if (fifo_state->dynamic_buffer) {
|
||||
vfree(fifo_state->dynamic_buffer);
|
||||
fifo_state->dynamic_buffer = NULL;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
down_write(&fifo_state->rwsem);
|
||||
if (fifo_state->using_bounce_buffer || reserveable) {
|
||||
next_cmd += bytes;
|
||||
if (next_cmd >= max)
|
||||
next_cmd -= max - min;
|
||||
mb();
|
||||
iowrite32(next_cmd, fifo_mem + SVGA_FIFO_NEXT_CMD);
|
||||
}
|
||||
|
||||
if (reserveable)
|
||||
iowrite32(0, fifo_mem + SVGA_FIFO_RESERVED);
|
||||
mb();
|
||||
up_write(&fifo_state->rwsem);
|
||||
vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC);
|
||||
mutex_unlock(&fifo_state->fifo_mutex);
|
||||
}
|
||||
|
||||
int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *seqno)
|
||||
{
|
||||
struct vmw_fifo_state *fifo_state = &dev_priv->fifo;
|
||||
struct svga_fifo_cmd_fence *cmd_fence;
|
||||
void *fm;
|
||||
int ret = 0;
|
||||
uint32_t bytes = sizeof(__le32) + sizeof(*cmd_fence);
|
||||
|
||||
fm = vmw_fifo_reserve(dev_priv, bytes);
|
||||
if (unlikely(fm == NULL)) {
|
||||
*seqno = atomic_read(&dev_priv->marker_seq);
|
||||
ret = -ENOMEM;
|
||||
(void)vmw_fallback_wait(dev_priv, false, true, *seqno,
|
||||
false, 3*HZ);
|
||||
goto out_err;
|
||||
}
|
||||
|
||||
do {
|
||||
*seqno = atomic_add_return(1, &dev_priv->marker_seq);
|
||||
} while (*seqno == 0);
|
||||
|
||||
if (!(fifo_state->capabilities & SVGA_FIFO_CAP_FENCE)) {
|
||||
|
||||
/*
|
||||
* Don't request hardware to send a fence. The
|
||||
* waiting code in vmwgfx_irq.c will emulate this.
|
||||
*/
|
||||
|
||||
vmw_fifo_commit(dev_priv, 0);
|
||||
return 0;
|
||||
}
|
||||
|
||||
*(__le32 *) fm = cpu_to_le32(SVGA_CMD_FENCE);
|
||||
cmd_fence = (struct svga_fifo_cmd_fence *)
|
||||
((unsigned long)fm + sizeof(__le32));
|
||||
|
||||
iowrite32(*seqno, &cmd_fence->fence);
|
||||
vmw_fifo_commit(dev_priv, bytes);
|
||||
(void) vmw_marker_push(&fifo_state->marker_queue, *seqno);
|
||||
vmw_update_seqno(dev_priv, fifo_state);
|
||||
|
||||
out_err:
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* vmw_fifo_emit_dummy_query - emits a dummy query to the fifo.
|
||||
*
|
||||
* @dev_priv: The device private structure.
|
||||
* @cid: The hardware context id used for the query.
|
||||
*
|
||||
* This function is used to emit a dummy occlusion query with
|
||||
* no primitives rendered between query begin and query end.
|
||||
* It's used to provide a query barrier, in order to know that when
|
||||
* this query is finished, all preceding queries are also finished.
|
||||
*
|
||||
* A Query results structure should have been initialized at the start
|
||||
* of the dev_priv->dummy_query_bo buffer object. And that buffer object
|
||||
* must also be either reserved or pinned when this function is called.
|
||||
*
|
||||
* Returns -ENOMEM on failure to reserve fifo space.
|
||||
*/
|
||||
int vmw_fifo_emit_dummy_query(struct vmw_private *dev_priv,
|
||||
uint32_t cid)
|
||||
{
|
||||
/*
|
||||
* A query wait without a preceding query end will
|
||||
* actually finish all queries for this cid
|
||||
* without writing to the query result structure.
|
||||
*/
|
||||
|
||||
struct ttm_buffer_object *bo = dev_priv->dummy_query_bo;
|
||||
struct {
|
||||
SVGA3dCmdHeader header;
|
||||
SVGA3dCmdWaitForQuery body;
|
||||
} *cmd;
|
||||
|
||||
cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
|
||||
|
||||
if (unlikely(cmd == NULL)) {
|
||||
DRM_ERROR("Out of fifo space for dummy query.\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
cmd->header.id = SVGA_3D_CMD_WAIT_FOR_QUERY;
|
||||
cmd->header.size = sizeof(cmd->body);
|
||||
cmd->body.cid = cid;
|
||||
cmd->body.type = SVGA3D_QUERYTYPE_OCCLUSION;
|
||||
|
||||
if (bo->mem.mem_type == TTM_PL_VRAM) {
|
||||
cmd->body.guestResult.gmrId = SVGA_GMR_FRAMEBUFFER;
|
||||
cmd->body.guestResult.offset = bo->offset;
|
||||
} else {
|
||||
cmd->body.guestResult.gmrId = bo->mem.start;
|
||||
cmd->body.guestResult.offset = 0;
|
||||
}
|
||||
|
||||
vmw_fifo_commit(dev_priv, sizeof(*cmd));
|
||||
|
||||
return 0;
|
||||
}
|
300
drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c
Normal file
300
drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c
Normal file
|
@ -0,0 +1,300 @@
|
|||
/**************************************************************************
|
||||
*
|
||||
* Copyright © 2009-2011 VMware, Inc., Palo Alto, CA., USA
|
||||
* All Rights Reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the
|
||||
* "Software"), to deal in the Software without restriction, including
|
||||
* without limitation the rights to use, copy, modify, merge, publish,
|
||||
* distribute, sub license, and/or sell copies of the Software, and to
|
||||
* permit persons to whom the Software is furnished to do so, subject to
|
||||
* the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the
|
||||
* next paragraph) shall be included in all copies or substantial portions
|
||||
* of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
|
||||
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
|
||||
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
|
||||
* USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
**************************************************************************/
|
||||
|
||||
#include "vmwgfx_drv.h"
|
||||
#include <drm/drmP.h>
|
||||
#include <drm/ttm/ttm_bo_driver.h>
|
||||
|
||||
#define VMW_PPN_SIZE (sizeof(unsigned long))
|
||||
/* A future safe maximum remap size. */
|
||||
#define VMW_PPN_PER_REMAP ((31 * 1024) / VMW_PPN_SIZE)
|
||||
|
||||
static int vmw_gmr2_bind(struct vmw_private *dev_priv,
|
||||
struct page *pages[],
|
||||
unsigned long num_pages,
|
||||
int gmr_id)
|
||||
{
|
||||
SVGAFifoCmdDefineGMR2 define_cmd;
|
||||
SVGAFifoCmdRemapGMR2 remap_cmd;
|
||||
uint32_t *cmd;
|
||||
uint32_t *cmd_orig;
|
||||
uint32_t define_size = sizeof(define_cmd) + sizeof(*cmd);
|
||||
uint32_t remap_num = num_pages / VMW_PPN_PER_REMAP + ((num_pages % VMW_PPN_PER_REMAP) > 0);
|
||||
uint32_t remap_size = VMW_PPN_SIZE * num_pages + (sizeof(remap_cmd) + sizeof(*cmd)) * remap_num;
|
||||
uint32_t remap_pos = 0;
|
||||
uint32_t cmd_size = define_size + remap_size;
|
||||
uint32_t i;
|
||||
|
||||
cmd_orig = cmd = vmw_fifo_reserve(dev_priv, cmd_size);
|
||||
if (unlikely(cmd == NULL))
|
||||
return -ENOMEM;
|
||||
|
||||
define_cmd.gmrId = gmr_id;
|
||||
define_cmd.numPages = num_pages;
|
||||
|
||||
*cmd++ = SVGA_CMD_DEFINE_GMR2;
|
||||
memcpy(cmd, &define_cmd, sizeof(define_cmd));
|
||||
cmd += sizeof(define_cmd) / sizeof(*cmd);
|
||||
|
||||
/*
|
||||
* Need to split the command if there are too many
|
||||
* pages that goes into the gmr.
|
||||
*/
|
||||
|
||||
remap_cmd.gmrId = gmr_id;
|
||||
remap_cmd.flags = (VMW_PPN_SIZE > sizeof(*cmd)) ?
|
||||
SVGA_REMAP_GMR2_PPN64 : SVGA_REMAP_GMR2_PPN32;
|
||||
|
||||
while (num_pages > 0) {
|
||||
unsigned long nr = min(num_pages, (unsigned long)VMW_PPN_PER_REMAP);
|
||||
|
||||
remap_cmd.offsetPages = remap_pos;
|
||||
remap_cmd.numPages = nr;
|
||||
|
||||
*cmd++ = SVGA_CMD_REMAP_GMR2;
|
||||
memcpy(cmd, &remap_cmd, sizeof(remap_cmd));
|
||||
cmd += sizeof(remap_cmd) / sizeof(*cmd);
|
||||
|
||||
for (i = 0; i < nr; ++i) {
|
||||
if (VMW_PPN_SIZE <= 4)
|
||||
*cmd = page_to_pfn(*pages++);
|
||||
else
|
||||
*((uint64_t *)cmd) = page_to_pfn(*pages++);
|
||||
|
||||
cmd += VMW_PPN_SIZE / sizeof(*cmd);
|
||||
}
|
||||
|
||||
num_pages -= nr;
|
||||
remap_pos += nr;
|
||||
}
|
||||
|
||||
BUG_ON(cmd != cmd_orig + cmd_size / sizeof(*cmd));
|
||||
|
||||
vmw_fifo_commit(dev_priv, cmd_size);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void vmw_gmr2_unbind(struct vmw_private *dev_priv,
|
||||
int gmr_id)
|
||||
{
|
||||
SVGAFifoCmdDefineGMR2 define_cmd;
|
||||
uint32_t define_size = sizeof(define_cmd) + 4;
|
||||
uint32_t *cmd;
|
||||
|
||||
cmd = vmw_fifo_reserve(dev_priv, define_size);
|
||||
if (unlikely(cmd == NULL)) {
|
||||
DRM_ERROR("GMR2 unbind failed.\n");
|
||||
return;
|
||||
}
|
||||
define_cmd.gmrId = gmr_id;
|
||||
define_cmd.numPages = 0;
|
||||
|
||||
*cmd++ = SVGA_CMD_DEFINE_GMR2;
|
||||
memcpy(cmd, &define_cmd, sizeof(define_cmd));
|
||||
|
||||
vmw_fifo_commit(dev_priv, define_size);
|
||||
}
|
||||
|
||||
/**
|
||||
* FIXME: Adjust to the ttm lowmem / highmem storage to minimize
|
||||
* the number of used descriptors.
|
||||
*/
|
||||
|
||||
static int vmw_gmr_build_descriptors(struct list_head *desc_pages,
|
||||
struct page *pages[],
|
||||
unsigned long num_pages)
|
||||
{
|
||||
struct page *page, *next;
|
||||
struct svga_guest_mem_descriptor *page_virtual = NULL;
|
||||
struct svga_guest_mem_descriptor *desc_virtual = NULL;
|
||||
unsigned int desc_per_page;
|
||||
unsigned long prev_pfn;
|
||||
unsigned long pfn;
|
||||
int ret;
|
||||
|
||||
desc_per_page = PAGE_SIZE /
|
||||
sizeof(struct svga_guest_mem_descriptor) - 1;
|
||||
|
||||
while (likely(num_pages != 0)) {
|
||||
page = alloc_page(__GFP_HIGHMEM);
|
||||
if (unlikely(page == NULL)) {
|
||||
ret = -ENOMEM;
|
||||
goto out_err;
|
||||
}
|
||||
|
||||
list_add_tail(&page->lru, desc_pages);
|
||||
|
||||
/*
|
||||
* Point previous page terminating descriptor to this
|
||||
* page before unmapping it.
|
||||
*/
|
||||
|
||||
if (likely(page_virtual != NULL)) {
|
||||
desc_virtual->ppn = page_to_pfn(page);
|
||||
kunmap_atomic(page_virtual);
|
||||
}
|
||||
|
||||
page_virtual = kmap_atomic(page);
|
||||
desc_virtual = page_virtual - 1;
|
||||
prev_pfn = ~(0UL);
|
||||
|
||||
while (likely(num_pages != 0)) {
|
||||
pfn = page_to_pfn(*pages);
|
||||
|
||||
if (pfn != prev_pfn + 1) {
|
||||
|
||||
if (desc_virtual - page_virtual ==
|
||||
desc_per_page - 1)
|
||||
break;
|
||||
|
||||
(++desc_virtual)->ppn = cpu_to_le32(pfn);
|
||||
desc_virtual->num_pages = cpu_to_le32(1);
|
||||
} else {
|
||||
uint32_t tmp =
|
||||
le32_to_cpu(desc_virtual->num_pages);
|
||||
desc_virtual->num_pages = cpu_to_le32(tmp + 1);
|
||||
}
|
||||
prev_pfn = pfn;
|
||||
--num_pages;
|
||||
++pages;
|
||||
}
|
||||
|
||||
(++desc_virtual)->ppn = cpu_to_le32(0);
|
||||
desc_virtual->num_pages = cpu_to_le32(0);
|
||||
}
|
||||
|
||||
if (likely(page_virtual != NULL))
|
||||
kunmap_atomic(page_virtual);
|
||||
|
||||
return 0;
|
||||
out_err:
|
||||
list_for_each_entry_safe(page, next, desc_pages, lru) {
|
||||
list_del_init(&page->lru);
|
||||
__free_page(page);
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
static inline void vmw_gmr_free_descriptors(struct list_head *desc_pages)
|
||||
{
|
||||
struct page *page, *next;
|
||||
|
||||
list_for_each_entry_safe(page, next, desc_pages, lru) {
|
||||
list_del_init(&page->lru);
|
||||
__free_page(page);
|
||||
}
|
||||
}
|
||||
|
||||
static void vmw_gmr_fire_descriptors(struct vmw_private *dev_priv,
|
||||
int gmr_id, struct list_head *desc_pages)
|
||||
{
|
||||
struct page *page;
|
||||
|
||||
if (unlikely(list_empty(desc_pages)))
|
||||
return;
|
||||
|
||||
page = list_entry(desc_pages->next, struct page, lru);
|
||||
|
||||
mutex_lock(&dev_priv->hw_mutex);
|
||||
|
||||
vmw_write(dev_priv, SVGA_REG_GMR_ID, gmr_id);
|
||||
wmb();
|
||||
vmw_write(dev_priv, SVGA_REG_GMR_DESCRIPTOR, page_to_pfn(page));
|
||||
mb();
|
||||
|
||||
mutex_unlock(&dev_priv->hw_mutex);
|
||||
|
||||
}
|
||||
|
||||
/**
|
||||
* FIXME: Adjust to the ttm lowmem / highmem storage to minimize
|
||||
* the number of used descriptors.
|
||||
*/
|
||||
|
||||
static unsigned long vmw_gmr_count_descriptors(struct page *pages[],
|
||||
unsigned long num_pages)
|
||||
{
|
||||
unsigned long prev_pfn = ~(0UL);
|
||||
unsigned long pfn;
|
||||
unsigned long descriptors = 0;
|
||||
|
||||
while (num_pages--) {
|
||||
pfn = page_to_pfn(*pages++);
|
||||
if (prev_pfn + 1 != pfn)
|
||||
++descriptors;
|
||||
prev_pfn = pfn;
|
||||
}
|
||||
|
||||
return descriptors;
|
||||
}
|
||||
|
||||
int vmw_gmr_bind(struct vmw_private *dev_priv,
|
||||
struct page *pages[],
|
||||
unsigned long num_pages,
|
||||
int gmr_id)
|
||||
{
|
||||
struct list_head desc_pages;
|
||||
int ret;
|
||||
|
||||
if (likely(dev_priv->capabilities & SVGA_CAP_GMR2))
|
||||
return vmw_gmr2_bind(dev_priv, pages, num_pages, gmr_id);
|
||||
|
||||
if (unlikely(!(dev_priv->capabilities & SVGA_CAP_GMR)))
|
||||
return -EINVAL;
|
||||
|
||||
if (vmw_gmr_count_descriptors(pages, num_pages) >
|
||||
dev_priv->max_gmr_descriptors)
|
||||
return -EINVAL;
|
||||
|
||||
INIT_LIST_HEAD(&desc_pages);
|
||||
|
||||
ret = vmw_gmr_build_descriptors(&desc_pages, pages, num_pages);
|
||||
if (unlikely(ret != 0))
|
||||
return ret;
|
||||
|
||||
vmw_gmr_fire_descriptors(dev_priv, gmr_id, &desc_pages);
|
||||
vmw_gmr_free_descriptors(&desc_pages);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
void vmw_gmr_unbind(struct vmw_private *dev_priv, int gmr_id)
|
||||
{
|
||||
if (likely(dev_priv->capabilities & SVGA_CAP_GMR2)) {
|
||||
vmw_gmr2_unbind(dev_priv, gmr_id);
|
||||
return;
|
||||
}
|
||||
|
||||
mutex_lock(&dev_priv->hw_mutex);
|
||||
vmw_write(dev_priv, SVGA_REG_GMR_ID, gmr_id);
|
||||
wmb();
|
||||
vmw_write(dev_priv, SVGA_REG_GMR_DESCRIPTOR, 0);
|
||||
mb();
|
||||
mutex_unlock(&dev_priv->hw_mutex);
|
||||
}
|
161
drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
Normal file
161
drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
Normal file
|
@ -0,0 +1,161 @@
|
|||
/**************************************************************************
|
||||
*
|
||||
* Copyright (c) 2007-2010 VMware, Inc., Palo Alto, CA., USA
|
||||
* All Rights Reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the
|
||||
* "Software"), to deal in the Software without restriction, including
|
||||
* without limitation the rights to use, copy, modify, merge, publish,
|
||||
* distribute, sub license, and/or sell copies of the Software, and to
|
||||
* permit persons to whom the Software is furnished to do so, subject to
|
||||
* the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the
|
||||
* next paragraph) shall be included in all copies or substantial portions
|
||||
* of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
|
||||
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
|
||||
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
|
||||
* USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
**************************************************************************/
|
||||
/*
|
||||
* Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
|
||||
*/
|
||||
|
||||
#include "vmwgfx_drv.h"
|
||||
#include <drm/ttm/ttm_module.h>
|
||||
#include <drm/ttm/ttm_bo_driver.h>
|
||||
#include <drm/ttm/ttm_placement.h>
|
||||
#include <linux/idr.h>
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/kernel.h>
|
||||
|
||||
struct vmwgfx_gmrid_man {
|
||||
spinlock_t lock;
|
||||
struct ida gmr_ida;
|
||||
uint32_t max_gmr_ids;
|
||||
uint32_t max_gmr_pages;
|
||||
uint32_t used_gmr_pages;
|
||||
};
|
||||
|
||||
static int vmw_gmrid_man_get_node(struct ttm_mem_type_manager *man,
|
||||
struct ttm_buffer_object *bo,
|
||||
struct ttm_placement *placement,
|
||||
struct ttm_mem_reg *mem)
|
||||
{
|
||||
struct vmwgfx_gmrid_man *gman =
|
||||
(struct vmwgfx_gmrid_man *)man->priv;
|
||||
int ret = 0;
|
||||
int id;
|
||||
|
||||
mem->mm_node = NULL;
|
||||
|
||||
spin_lock(&gman->lock);
|
||||
|
||||
if (gman->max_gmr_pages > 0) {
|
||||
gman->used_gmr_pages += bo->num_pages;
|
||||
if (unlikely(gman->used_gmr_pages > gman->max_gmr_pages))
|
||||
goto out_err_locked;
|
||||
}
|
||||
|
||||
do {
|
||||
spin_unlock(&gman->lock);
|
||||
if (unlikely(ida_pre_get(&gman->gmr_ida, GFP_KERNEL) == 0)) {
|
||||
ret = -ENOMEM;
|
||||
goto out_err;
|
||||
}
|
||||
spin_lock(&gman->lock);
|
||||
|
||||
ret = ida_get_new(&gman->gmr_ida, &id);
|
||||
if (unlikely(ret == 0 && id >= gman->max_gmr_ids)) {
|
||||
ida_remove(&gman->gmr_ida, id);
|
||||
ret = 0;
|
||||
goto out_err_locked;
|
||||
}
|
||||
} while (ret == -EAGAIN);
|
||||
|
||||
if (likely(ret == 0)) {
|
||||
mem->mm_node = gman;
|
||||
mem->start = id;
|
||||
mem->num_pages = bo->num_pages;
|
||||
} else
|
||||
goto out_err_locked;
|
||||
|
||||
spin_unlock(&gman->lock);
|
||||
return 0;
|
||||
|
||||
out_err:
|
||||
spin_lock(&gman->lock);
|
||||
out_err_locked:
|
||||
gman->used_gmr_pages -= bo->num_pages;
|
||||
spin_unlock(&gman->lock);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void vmw_gmrid_man_put_node(struct ttm_mem_type_manager *man,
|
||||
struct ttm_mem_reg *mem)
|
||||
{
|
||||
struct vmwgfx_gmrid_man *gman =
|
||||
(struct vmwgfx_gmrid_man *)man->priv;
|
||||
|
||||
if (mem->mm_node) {
|
||||
spin_lock(&gman->lock);
|
||||
ida_remove(&gman->gmr_ida, mem->start);
|
||||
gman->used_gmr_pages -= mem->num_pages;
|
||||
spin_unlock(&gman->lock);
|
||||
mem->mm_node = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
static int vmw_gmrid_man_init(struct ttm_mem_type_manager *man,
|
||||
unsigned long p_size)
|
||||
{
|
||||
struct vmw_private *dev_priv =
|
||||
container_of(man->bdev, struct vmw_private, bdev);
|
||||
struct vmwgfx_gmrid_man *gman =
|
||||
kzalloc(sizeof(*gman), GFP_KERNEL);
|
||||
|
||||
if (unlikely(gman == NULL))
|
||||
return -ENOMEM;
|
||||
|
||||
spin_lock_init(&gman->lock);
|
||||
gman->max_gmr_pages = dev_priv->max_gmr_pages;
|
||||
gman->used_gmr_pages = 0;
|
||||
ida_init(&gman->gmr_ida);
|
||||
gman->max_gmr_ids = p_size;
|
||||
man->priv = (void *) gman;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int vmw_gmrid_man_takedown(struct ttm_mem_type_manager *man)
|
||||
{
|
||||
struct vmwgfx_gmrid_man *gman =
|
||||
(struct vmwgfx_gmrid_man *)man->priv;
|
||||
|
||||
if (gman) {
|
||||
ida_destroy(&gman->gmr_ida);
|
||||
kfree(gman);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void vmw_gmrid_man_debug(struct ttm_mem_type_manager *man,
|
||||
const char *prefix)
|
||||
{
|
||||
printk(KERN_INFO "%s: No debug info available for the GMR "
|
||||
"id manager.\n", prefix);
|
||||
}
|
||||
|
||||
const struct ttm_mem_type_manager_func vmw_gmrid_manager_func = {
|
||||
vmw_gmrid_man_init,
|
||||
vmw_gmrid_man_takedown,
|
||||
vmw_gmrid_man_get_node,
|
||||
vmw_gmrid_man_put_node,
|
||||
vmw_gmrid_man_debug
|
||||
};
|
326
drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
Normal file
326
drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
Normal file
|
@ -0,0 +1,326 @@
|
|||
/**************************************************************************
|
||||
*
|
||||
* Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
|
||||
* All Rights Reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the
|
||||
* "Software"), to deal in the Software without restriction, including
|
||||
* without limitation the rights to use, copy, modify, merge, publish,
|
||||
* distribute, sub license, and/or sell copies of the Software, and to
|
||||
* permit persons to whom the Software is furnished to do so, subject to
|
||||
* the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the
|
||||
* next paragraph) shall be included in all copies or substantial portions
|
||||
* of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
|
||||
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
|
||||
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
|
||||
* USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
**************************************************************************/
|
||||
|
||||
#include "vmwgfx_drv.h"
|
||||
#include <drm/vmwgfx_drm.h>
|
||||
#include "vmwgfx_kms.h"
|
||||
|
||||
int vmw_getparam_ioctl(struct drm_device *dev, void *data,
|
||||
struct drm_file *file_priv)
|
||||
{
|
||||
struct vmw_private *dev_priv = vmw_priv(dev);
|
||||
struct drm_vmw_getparam_arg *param =
|
||||
(struct drm_vmw_getparam_arg *)data;
|
||||
|
||||
switch (param->param) {
|
||||
case DRM_VMW_PARAM_NUM_STREAMS:
|
||||
param->value = vmw_overlay_num_overlays(dev_priv);
|
||||
break;
|
||||
case DRM_VMW_PARAM_NUM_FREE_STREAMS:
|
||||
param->value = vmw_overlay_num_free_overlays(dev_priv);
|
||||
break;
|
||||
case DRM_VMW_PARAM_3D:
|
||||
param->value = vmw_fifo_have_3d(dev_priv) ? 1 : 0;
|
||||
break;
|
||||
case DRM_VMW_PARAM_HW_CAPS:
|
||||
param->value = dev_priv->capabilities;
|
||||
break;
|
||||
case DRM_VMW_PARAM_FIFO_CAPS:
|
||||
param->value = dev_priv->fifo.capabilities;
|
||||
break;
|
||||
case DRM_VMW_PARAM_MAX_FB_SIZE:
|
||||
param->value = dev_priv->vram_size;
|
||||
break;
|
||||
case DRM_VMW_PARAM_FIFO_HW_VERSION:
|
||||
{
|
||||
__le32 __iomem *fifo_mem = dev_priv->mmio_virt;
|
||||
const struct vmw_fifo_state *fifo = &dev_priv->fifo;
|
||||
|
||||
param->value =
|
||||
ioread32(fifo_mem +
|
||||
((fifo->capabilities &
|
||||
SVGA_FIFO_CAP_3D_HWVERSION_REVISED) ?
|
||||
SVGA_FIFO_3D_HWVERSION_REVISED :
|
||||
SVGA_FIFO_3D_HWVERSION));
|
||||
break;
|
||||
}
|
||||
default:
|
||||
DRM_ERROR("Illegal vmwgfx get param request: %d\n",
|
||||
param->param);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
int vmw_get_cap_3d_ioctl(struct drm_device *dev, void *data,
|
||||
struct drm_file *file_priv)
|
||||
{
|
||||
struct drm_vmw_get_3d_cap_arg *arg =
|
||||
(struct drm_vmw_get_3d_cap_arg *) data;
|
||||
struct vmw_private *dev_priv = vmw_priv(dev);
|
||||
uint32_t size;
|
||||
__le32 __iomem *fifo_mem;
|
||||
void __user *buffer = (void __user *)((unsigned long)(arg->buffer));
|
||||
void *bounce;
|
||||
int ret;
|
||||
|
||||
if (unlikely(arg->pad64 != 0)) {
|
||||
DRM_ERROR("Illegal GET_3D_CAP argument.\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
size = (SVGA_FIFO_3D_CAPS_LAST - SVGA_FIFO_3D_CAPS + 1) << 2;
|
||||
|
||||
if (arg->max_size < size)
|
||||
size = arg->max_size;
|
||||
|
||||
bounce = vmalloc(size);
|
||||
if (unlikely(bounce == NULL)) {
|
||||
DRM_ERROR("Failed to allocate bounce buffer for 3D caps.\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
fifo_mem = dev_priv->mmio_virt;
|
||||
memcpy_fromio(bounce, &fifo_mem[SVGA_FIFO_3D_CAPS], size);
|
||||
|
||||
ret = copy_to_user(buffer, bounce, size);
|
||||
if (ret)
|
||||
ret = -EFAULT;
|
||||
vfree(bounce);
|
||||
|
||||
if (unlikely(ret != 0))
|
||||
DRM_ERROR("Failed to report 3D caps info.\n");
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
int vmw_present_ioctl(struct drm_device *dev, void *data,
|
||||
struct drm_file *file_priv)
|
||||
{
|
||||
struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
|
||||
struct vmw_private *dev_priv = vmw_priv(dev);
|
||||
struct drm_vmw_present_arg *arg =
|
||||
(struct drm_vmw_present_arg *)data;
|
||||
struct vmw_surface *surface;
|
||||
struct vmw_master *vmaster = vmw_master(file_priv->master);
|
||||
struct drm_vmw_rect __user *clips_ptr;
|
||||
struct drm_vmw_rect *clips = NULL;
|
||||
struct drm_framebuffer *fb;
|
||||
struct vmw_framebuffer *vfb;
|
||||
struct vmw_resource *res;
|
||||
uint32_t num_clips;
|
||||
int ret;
|
||||
|
||||
num_clips = arg->num_clips;
|
||||
clips_ptr = (struct drm_vmw_rect *)(unsigned long)arg->clips_ptr;
|
||||
|
||||
if (unlikely(num_clips == 0))
|
||||
return 0;
|
||||
|
||||
if (clips_ptr == NULL) {
|
||||
DRM_ERROR("Variable clips_ptr must be specified.\n");
|
||||
ret = -EINVAL;
|
||||
goto out_clips;
|
||||
}
|
||||
|
||||
clips = kcalloc(num_clips, sizeof(*clips), GFP_KERNEL);
|
||||
if (clips == NULL) {
|
||||
DRM_ERROR("Failed to allocate clip rect list.\n");
|
||||
ret = -ENOMEM;
|
||||
goto out_clips;
|
||||
}
|
||||
|
||||
ret = copy_from_user(clips, clips_ptr, num_clips * sizeof(*clips));
|
||||
if (ret) {
|
||||
DRM_ERROR("Failed to copy clip rects from userspace.\n");
|
||||
ret = -EFAULT;
|
||||
goto out_no_copy;
|
||||
}
|
||||
|
||||
drm_modeset_lock_all(dev);
|
||||
|
||||
fb = drm_framebuffer_lookup(dev, arg->fb_id);
|
||||
if (!fb) {
|
||||
DRM_ERROR("Invalid framebuffer id.\n");
|
||||
ret = -EINVAL;
|
||||
goto out_no_fb;
|
||||
}
|
||||
vfb = vmw_framebuffer_to_vfb(fb);
|
||||
|
||||
ret = ttm_read_lock(&vmaster->lock, true);
|
||||
if (unlikely(ret != 0))
|
||||
goto out_no_ttm_lock;
|
||||
|
||||
ret = vmw_user_resource_lookup_handle(dev_priv, tfile, arg->sid,
|
||||
user_surface_converter,
|
||||
&res);
|
||||
if (ret)
|
||||
goto out_no_surface;
|
||||
|
||||
surface = vmw_res_to_srf(res);
|
||||
ret = vmw_kms_present(dev_priv, file_priv,
|
||||
vfb, surface, arg->sid,
|
||||
arg->dest_x, arg->dest_y,
|
||||
clips, num_clips);
|
||||
|
||||
/* vmw_user_surface_lookup takes one ref so does new_fb */
|
||||
vmw_surface_unreference(&surface);
|
||||
|
||||
out_no_surface:
|
||||
ttm_read_unlock(&vmaster->lock);
|
||||
out_no_ttm_lock:
|
||||
drm_framebuffer_unreference(fb);
|
||||
out_no_fb:
|
||||
drm_modeset_unlock_all(dev);
|
||||
out_no_copy:
|
||||
kfree(clips);
|
||||
out_clips:
|
||||
return ret;
|
||||
}
|
||||
|
||||
int vmw_present_readback_ioctl(struct drm_device *dev, void *data,
|
||||
struct drm_file *file_priv)
|
||||
{
|
||||
struct vmw_private *dev_priv = vmw_priv(dev);
|
||||
struct drm_vmw_present_readback_arg *arg =
|
||||
(struct drm_vmw_present_readback_arg *)data;
|
||||
struct drm_vmw_fence_rep __user *user_fence_rep =
|
||||
(struct drm_vmw_fence_rep __user *)
|
||||
(unsigned long)arg->fence_rep;
|
||||
struct vmw_master *vmaster = vmw_master(file_priv->master);
|
||||
struct drm_vmw_rect __user *clips_ptr;
|
||||
struct drm_vmw_rect *clips = NULL;
|
||||
struct drm_framebuffer *fb;
|
||||
struct vmw_framebuffer *vfb;
|
||||
uint32_t num_clips;
|
||||
int ret;
|
||||
|
||||
num_clips = arg->num_clips;
|
||||
clips_ptr = (struct drm_vmw_rect *)(unsigned long)arg->clips_ptr;
|
||||
|
||||
if (unlikely(num_clips == 0))
|
||||
return 0;
|
||||
|
||||
if (clips_ptr == NULL) {
|
||||
DRM_ERROR("Argument clips_ptr must be specified.\n");
|
||||
ret = -EINVAL;
|
||||
goto out_clips;
|
||||
}
|
||||
|
||||
clips = kcalloc(num_clips, sizeof(*clips), GFP_KERNEL);
|
||||
if (clips == NULL) {
|
||||
DRM_ERROR("Failed to allocate clip rect list.\n");
|
||||
ret = -ENOMEM;
|
||||
goto out_clips;
|
||||
}
|
||||
|
||||
ret = copy_from_user(clips, clips_ptr, num_clips * sizeof(*clips));
|
||||
if (ret) {
|
||||
DRM_ERROR("Failed to copy clip rects from userspace.\n");
|
||||
ret = -EFAULT;
|
||||
goto out_no_copy;
|
||||
}
|
||||
|
||||
drm_modeset_lock_all(dev);
|
||||
|
||||
fb = drm_framebuffer_lookup(dev, arg->fb_id);
|
||||
if (!fb) {
|
||||
DRM_ERROR("Invalid framebuffer id.\n");
|
||||
ret = -EINVAL;
|
||||
goto out_no_fb;
|
||||
}
|
||||
|
||||
vfb = vmw_framebuffer_to_vfb(fb);
|
||||
if (!vfb->dmabuf) {
|
||||
DRM_ERROR("Framebuffer not dmabuf backed.\n");
|
||||
ret = -EINVAL;
|
||||
goto out_no_ttm_lock;
|
||||
}
|
||||
|
||||
ret = ttm_read_lock(&vmaster->lock, true);
|
||||
if (unlikely(ret != 0))
|
||||
goto out_no_ttm_lock;
|
||||
|
||||
ret = vmw_kms_readback(dev_priv, file_priv,
|
||||
vfb, user_fence_rep,
|
||||
clips, num_clips);
|
||||
|
||||
ttm_read_unlock(&vmaster->lock);
|
||||
out_no_ttm_lock:
|
||||
drm_framebuffer_unreference(fb);
|
||||
out_no_fb:
|
||||
drm_modeset_unlock_all(dev);
|
||||
out_no_copy:
|
||||
kfree(clips);
|
||||
out_clips:
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* vmw_fops_poll - wrapper around the drm_poll function
|
||||
*
|
||||
* @filp: See the linux fops poll documentation.
|
||||
* @wait: See the linux fops poll documentation.
|
||||
*
|
||||
* Wrapper around the drm_poll function that makes sure the device is
|
||||
* processing the fifo if drm_poll decides to wait.
|
||||
*/
|
||||
unsigned int vmw_fops_poll(struct file *filp, struct poll_table_struct *wait)
|
||||
{
|
||||
struct drm_file *file_priv = filp->private_data;
|
||||
struct vmw_private *dev_priv =
|
||||
vmw_priv(file_priv->minor->dev);
|
||||
|
||||
vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC);
|
||||
return drm_poll(filp, wait);
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* vmw_fops_read - wrapper around the drm_read function
|
||||
*
|
||||
* @filp: See the linux fops read documentation.
|
||||
* @buffer: See the linux fops read documentation.
|
||||
* @count: See the linux fops read documentation.
|
||||
* offset: See the linux fops read documentation.
|
||||
*
|
||||
* Wrapper around the drm_read function that makes sure the device is
|
||||
* processing the fifo if drm_read decides to wait.
|
||||
*/
|
||||
ssize_t vmw_fops_read(struct file *filp, char __user *buffer,
|
||||
size_t count, loff_t *offset)
|
||||
{
|
||||
struct drm_file *file_priv = filp->private_data;
|
||||
struct vmw_private *dev_priv =
|
||||
vmw_priv(file_priv->minor->dev);
|
||||
|
||||
vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC);
|
||||
return drm_read(filp, buffer, count, offset);
|
||||
}
|
324
drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
Normal file
324
drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
Normal file
|
@ -0,0 +1,324 @@
|
|||
/**************************************************************************
|
||||
*
|
||||
* Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
|
||||
* All Rights Reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the
|
||||
* "Software"), to deal in the Software without restriction, including
|
||||
* without limitation the rights to use, copy, modify, merge, publish,
|
||||
* distribute, sub license, and/or sell copies of the Software, and to
|
||||
* permit persons to whom the Software is furnished to do so, subject to
|
||||
* the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the
|
||||
* next paragraph) shall be included in all copies or substantial portions
|
||||
* of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
|
||||
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
|
||||
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
|
||||
* USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
**************************************************************************/
|
||||
|
||||
#include <drm/drmP.h>
|
||||
#include "vmwgfx_drv.h"
|
||||
|
||||
#define VMW_FENCE_WRAP (1 << 24)
|
||||
|
||||
irqreturn_t vmw_irq_handler(DRM_IRQ_ARGS)
|
||||
{
|
||||
struct drm_device *dev = (struct drm_device *)arg;
|
||||
struct vmw_private *dev_priv = vmw_priv(dev);
|
||||
uint32_t status, masked_status;
|
||||
|
||||
spin_lock(&dev_priv->irq_lock);
|
||||
status = inl(dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
|
||||
masked_status = status & dev_priv->irq_mask;
|
||||
spin_unlock(&dev_priv->irq_lock);
|
||||
|
||||
if (likely(status))
|
||||
outl(status, dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
|
||||
|
||||
if (!masked_status)
|
||||
return IRQ_NONE;
|
||||
|
||||
if (masked_status & (SVGA_IRQFLAG_ANY_FENCE |
|
||||
SVGA_IRQFLAG_FENCE_GOAL)) {
|
||||
vmw_fences_update(dev_priv->fman);
|
||||
wake_up_all(&dev_priv->fence_queue);
|
||||
}
|
||||
|
||||
if (masked_status & SVGA_IRQFLAG_FIFO_PROGRESS)
|
||||
wake_up_all(&dev_priv->fifo_queue);
|
||||
|
||||
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
static bool vmw_fifo_idle(struct vmw_private *dev_priv, uint32_t seqno)
|
||||
{
|
||||
uint32_t busy;
|
||||
|
||||
mutex_lock(&dev_priv->hw_mutex);
|
||||
busy = vmw_read(dev_priv, SVGA_REG_BUSY);
|
||||
mutex_unlock(&dev_priv->hw_mutex);
|
||||
|
||||
return (busy == 0);
|
||||
}
|
||||
|
||||
void vmw_update_seqno(struct vmw_private *dev_priv,
|
||||
struct vmw_fifo_state *fifo_state)
|
||||
{
|
||||
__le32 __iomem *fifo_mem = dev_priv->mmio_virt;
|
||||
uint32_t seqno = ioread32(fifo_mem + SVGA_FIFO_FENCE);
|
||||
|
||||
if (dev_priv->last_read_seqno != seqno) {
|
||||
dev_priv->last_read_seqno = seqno;
|
||||
vmw_marker_pull(&fifo_state->marker_queue, seqno);
|
||||
vmw_fences_update(dev_priv->fman);
|
||||
}
|
||||
}
|
||||
|
||||
bool vmw_seqno_passed(struct vmw_private *dev_priv,
|
||||
uint32_t seqno)
|
||||
{
|
||||
struct vmw_fifo_state *fifo_state;
|
||||
bool ret;
|
||||
|
||||
if (likely(dev_priv->last_read_seqno - seqno < VMW_FENCE_WRAP))
|
||||
return true;
|
||||
|
||||
fifo_state = &dev_priv->fifo;
|
||||
vmw_update_seqno(dev_priv, fifo_state);
|
||||
if (likely(dev_priv->last_read_seqno - seqno < VMW_FENCE_WRAP))
|
||||
return true;
|
||||
|
||||
if (!(fifo_state->capabilities & SVGA_FIFO_CAP_FENCE) &&
|
||||
vmw_fifo_idle(dev_priv, seqno))
|
||||
return true;
|
||||
|
||||
/**
|
||||
* Then check if the seqno is higher than what we've actually
|
||||
* emitted. Then the fence is stale and signaled.
|
||||
*/
|
||||
|
||||
ret = ((atomic_read(&dev_priv->marker_seq) - seqno)
|
||||
> VMW_FENCE_WRAP);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
int vmw_fallback_wait(struct vmw_private *dev_priv,
|
||||
bool lazy,
|
||||
bool fifo_idle,
|
||||
uint32_t seqno,
|
||||
bool interruptible,
|
||||
unsigned long timeout)
|
||||
{
|
||||
struct vmw_fifo_state *fifo_state = &dev_priv->fifo;
|
||||
|
||||
uint32_t count = 0;
|
||||
uint32_t signal_seq;
|
||||
int ret;
|
||||
unsigned long end_jiffies = jiffies + timeout;
|
||||
bool (*wait_condition)(struct vmw_private *, uint32_t);
|
||||
DEFINE_WAIT(__wait);
|
||||
|
||||
wait_condition = (fifo_idle) ? &vmw_fifo_idle :
|
||||
&vmw_seqno_passed;
|
||||
|
||||
/**
|
||||
* Block command submission while waiting for idle.
|
||||
*/
|
||||
|
||||
if (fifo_idle)
|
||||
down_read(&fifo_state->rwsem);
|
||||
signal_seq = atomic_read(&dev_priv->marker_seq);
|
||||
ret = 0;
|
||||
|
||||
for (;;) {
|
||||
prepare_to_wait(&dev_priv->fence_queue, &__wait,
|
||||
(interruptible) ?
|
||||
TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE);
|
||||
if (wait_condition(dev_priv, seqno))
|
||||
break;
|
||||
if (time_after_eq(jiffies, end_jiffies)) {
|
||||
DRM_ERROR("SVGA device lockup.\n");
|
||||
break;
|
||||
}
|
||||
if (lazy)
|
||||
schedule_timeout(1);
|
||||
else if ((++count & 0x0F) == 0) {
|
||||
/**
|
||||
* FIXME: Use schedule_hr_timeout here for
|
||||
* newer kernels and lower CPU utilization.
|
||||
*/
|
||||
|
||||
__set_current_state(TASK_RUNNING);
|
||||
schedule();
|
||||
__set_current_state((interruptible) ?
|
||||
TASK_INTERRUPTIBLE :
|
||||
TASK_UNINTERRUPTIBLE);
|
||||
}
|
||||
if (interruptible && signal_pending(current)) {
|
||||
ret = -ERESTARTSYS;
|
||||
break;
|
||||
}
|
||||
}
|
||||
finish_wait(&dev_priv->fence_queue, &__wait);
|
||||
if (ret == 0 && fifo_idle) {
|
||||
__le32 __iomem *fifo_mem = dev_priv->mmio_virt;
|
||||
iowrite32(signal_seq, fifo_mem + SVGA_FIFO_FENCE);
|
||||
}
|
||||
wake_up_all(&dev_priv->fence_queue);
|
||||
if (fifo_idle)
|
||||
up_read(&fifo_state->rwsem);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
void vmw_seqno_waiter_add(struct vmw_private *dev_priv)
|
||||
{
|
||||
mutex_lock(&dev_priv->hw_mutex);
|
||||
if (dev_priv->fence_queue_waiters++ == 0) {
|
||||
unsigned long irq_flags;
|
||||
|
||||
spin_lock_irqsave(&dev_priv->irq_lock, irq_flags);
|
||||
outl(SVGA_IRQFLAG_ANY_FENCE,
|
||||
dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
|
||||
dev_priv->irq_mask |= SVGA_IRQFLAG_ANY_FENCE;
|
||||
vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask);
|
||||
spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags);
|
||||
}
|
||||
mutex_unlock(&dev_priv->hw_mutex);
|
||||
}
|
||||
|
||||
void vmw_seqno_waiter_remove(struct vmw_private *dev_priv)
|
||||
{
|
||||
mutex_lock(&dev_priv->hw_mutex);
|
||||
if (--dev_priv->fence_queue_waiters == 0) {
|
||||
unsigned long irq_flags;
|
||||
|
||||
spin_lock_irqsave(&dev_priv->irq_lock, irq_flags);
|
||||
dev_priv->irq_mask &= ~SVGA_IRQFLAG_ANY_FENCE;
|
||||
vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask);
|
||||
spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags);
|
||||
}
|
||||
mutex_unlock(&dev_priv->hw_mutex);
|
||||
}
|
||||
|
||||
|
||||
void vmw_goal_waiter_add(struct vmw_private *dev_priv)
|
||||
{
|
||||
mutex_lock(&dev_priv->hw_mutex);
|
||||
if (dev_priv->goal_queue_waiters++ == 0) {
|
||||
unsigned long irq_flags;
|
||||
|
||||
spin_lock_irqsave(&dev_priv->irq_lock, irq_flags);
|
||||
outl(SVGA_IRQFLAG_FENCE_GOAL,
|
||||
dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
|
||||
dev_priv->irq_mask |= SVGA_IRQFLAG_FENCE_GOAL;
|
||||
vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask);
|
||||
spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags);
|
||||
}
|
||||
mutex_unlock(&dev_priv->hw_mutex);
|
||||
}
|
||||
|
||||
void vmw_goal_waiter_remove(struct vmw_private *dev_priv)
|
||||
{
|
||||
mutex_lock(&dev_priv->hw_mutex);
|
||||
if (--dev_priv->goal_queue_waiters == 0) {
|
||||
unsigned long irq_flags;
|
||||
|
||||
spin_lock_irqsave(&dev_priv->irq_lock, irq_flags);
|
||||
dev_priv->irq_mask &= ~SVGA_IRQFLAG_FENCE_GOAL;
|
||||
vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask);
|
||||
spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags);
|
||||
}
|
||||
mutex_unlock(&dev_priv->hw_mutex);
|
||||
}
|
||||
|
||||
int vmw_wait_seqno(struct vmw_private *dev_priv,
|
||||
bool lazy, uint32_t seqno,
|
||||
bool interruptible, unsigned long timeout)
|
||||
{
|
||||
long ret;
|
||||
struct vmw_fifo_state *fifo = &dev_priv->fifo;
|
||||
|
||||
if (likely(dev_priv->last_read_seqno - seqno < VMW_FENCE_WRAP))
|
||||
return 0;
|
||||
|
||||
if (likely(vmw_seqno_passed(dev_priv, seqno)))
|
||||
return 0;
|
||||
|
||||
vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC);
|
||||
|
||||
if (!(fifo->capabilities & SVGA_FIFO_CAP_FENCE))
|
||||
return vmw_fallback_wait(dev_priv, lazy, true, seqno,
|
||||
interruptible, timeout);
|
||||
|
||||
if (!(dev_priv->capabilities & SVGA_CAP_IRQMASK))
|
||||
return vmw_fallback_wait(dev_priv, lazy, false, seqno,
|
||||
interruptible, timeout);
|
||||
|
||||
vmw_seqno_waiter_add(dev_priv);
|
||||
|
||||
if (interruptible)
|
||||
ret = wait_event_interruptible_timeout
|
||||
(dev_priv->fence_queue,
|
||||
vmw_seqno_passed(dev_priv, seqno),
|
||||
timeout);
|
||||
else
|
||||
ret = wait_event_timeout
|
||||
(dev_priv->fence_queue,
|
||||
vmw_seqno_passed(dev_priv, seqno),
|
||||
timeout);
|
||||
|
||||
vmw_seqno_waiter_remove(dev_priv);
|
||||
|
||||
if (unlikely(ret == 0))
|
||||
ret = -EBUSY;
|
||||
else if (likely(ret > 0))
|
||||
ret = 0;
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
void vmw_irq_preinstall(struct drm_device *dev)
|
||||
{
|
||||
struct vmw_private *dev_priv = vmw_priv(dev);
|
||||
uint32_t status;
|
||||
|
||||
if (!(dev_priv->capabilities & SVGA_CAP_IRQMASK))
|
||||
return;
|
||||
|
||||
spin_lock_init(&dev_priv->irq_lock);
|
||||
status = inl(dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
|
||||
outl(status, dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
|
||||
}
|
||||
|
||||
int vmw_irq_postinstall(struct drm_device *dev)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
void vmw_irq_uninstall(struct drm_device *dev)
|
||||
{
|
||||
struct vmw_private *dev_priv = vmw_priv(dev);
|
||||
uint32_t status;
|
||||
|
||||
if (!(dev_priv->capabilities & SVGA_CAP_IRQMASK))
|
||||
return;
|
||||
|
||||
mutex_lock(&dev_priv->hw_mutex);
|
||||
vmw_write(dev_priv, SVGA_REG_IRQMASK, 0);
|
||||
mutex_unlock(&dev_priv->hw_mutex);
|
||||
|
||||
status = inl(dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
|
||||
outl(status, dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
|
||||
}
|
2060
drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
Normal file
2060
drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
Normal file
File diff suppressed because it is too large
Load diff
166
drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
Normal file
166
drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
Normal file
|
@ -0,0 +1,166 @@
|
|||
/**************************************************************************
|
||||
*
|
||||
* Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
|
||||
* All Rights Reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the
|
||||
* "Software"), to deal in the Software without restriction, including
|
||||
* without limitation the rights to use, copy, modify, merge, publish,
|
||||
* distribute, sub license, and/or sell copies of the Software, and to
|
||||
* permit persons to whom the Software is furnished to do so, subject to
|
||||
* the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the
|
||||
* next paragraph) shall be included in all copies or substantial portions
|
||||
* of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
|
||||
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
|
||||
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
|
||||
* USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
**************************************************************************/
|
||||
|
||||
#ifndef VMWGFX_KMS_H_
|
||||
#define VMWGFX_KMS_H_
|
||||
|
||||
#include <drm/drmP.h>
|
||||
#include <drm/drm_crtc_helper.h>
|
||||
#include "vmwgfx_drv.h"
|
||||
|
||||
#define VMWGFX_NUM_DISPLAY_UNITS 8
|
||||
|
||||
|
||||
#define vmw_framebuffer_to_vfb(x) \
|
||||
container_of(x, struct vmw_framebuffer, base)
|
||||
|
||||
/**
|
||||
* Base class for framebuffers
|
||||
*
|
||||
* @pin is called the when ever a crtc uses this framebuffer
|
||||
* @unpin is called
|
||||
*/
|
||||
struct vmw_framebuffer {
|
||||
struct drm_framebuffer base;
|
||||
int (*pin)(struct vmw_framebuffer *fb);
|
||||
int (*unpin)(struct vmw_framebuffer *fb);
|
||||
bool dmabuf;
|
||||
struct ttm_base_object *user_obj;
|
||||
uint32_t user_handle;
|
||||
};
|
||||
|
||||
|
||||
#define vmw_crtc_to_du(x) \
|
||||
container_of(x, struct vmw_display_unit, crtc)
|
||||
|
||||
/*
|
||||
* Basic cursor manipulation
|
||||
*/
|
||||
int vmw_cursor_update_image(struct vmw_private *dev_priv,
|
||||
u32 *image, u32 width, u32 height,
|
||||
u32 hotspotX, u32 hotspotY);
|
||||
int vmw_cursor_update_dmabuf(struct vmw_private *dev_priv,
|
||||
struct vmw_dma_buffer *dmabuf,
|
||||
u32 width, u32 height,
|
||||
u32 hotspotX, u32 hotspotY);
|
||||
void vmw_cursor_update_position(struct vmw_private *dev_priv,
|
||||
bool show, int x, int y);
|
||||
|
||||
|
||||
/**
|
||||
* Base class display unit.
|
||||
*
|
||||
* Since the SVGA hw doesn't have a concept of a crtc, encoder or connector
|
||||
* so the display unit is all of them at the same time. This is true for both
|
||||
* legacy multimon and screen objects.
|
||||
*/
|
||||
struct vmw_display_unit {
|
||||
struct drm_crtc crtc;
|
||||
struct drm_encoder encoder;
|
||||
struct drm_connector connector;
|
||||
|
||||
struct vmw_surface *cursor_surface;
|
||||
struct vmw_dma_buffer *cursor_dmabuf;
|
||||
size_t cursor_age;
|
||||
|
||||
int cursor_x;
|
||||
int cursor_y;
|
||||
|
||||
int hotspot_x;
|
||||
int hotspot_y;
|
||||
|
||||
unsigned unit;
|
||||
|
||||
/*
|
||||
* Prefered mode tracking.
|
||||
*/
|
||||
unsigned pref_width;
|
||||
unsigned pref_height;
|
||||
bool pref_active;
|
||||
struct drm_display_mode *pref_mode;
|
||||
|
||||
/*
|
||||
* Gui positioning
|
||||
*/
|
||||
int gui_x;
|
||||
int gui_y;
|
||||
bool is_implicit;
|
||||
};
|
||||
|
||||
#define vmw_crtc_to_du(x) \
|
||||
container_of(x, struct vmw_display_unit, crtc)
|
||||
#define vmw_connector_to_du(x) \
|
||||
container_of(x, struct vmw_display_unit, connector)
|
||||
|
||||
|
||||
/*
|
||||
* Shared display unit functions - vmwgfx_kms.c
|
||||
*/
|
||||
void vmw_display_unit_cleanup(struct vmw_display_unit *du);
|
||||
int vmw_du_page_flip(struct drm_crtc *crtc,
|
||||
struct drm_framebuffer *fb,
|
||||
struct drm_pending_vblank_event *event);
|
||||
void vmw_du_crtc_save(struct drm_crtc *crtc);
|
||||
void vmw_du_crtc_restore(struct drm_crtc *crtc);
|
||||
void vmw_du_crtc_gamma_set(struct drm_crtc *crtc,
|
||||
u16 *r, u16 *g, u16 *b,
|
||||
uint32_t start, uint32_t size);
|
||||
int vmw_du_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv,
|
||||
uint32_t handle, uint32_t width, uint32_t height);
|
||||
int vmw_du_crtc_cursor_move(struct drm_crtc *crtc, int x, int y);
|
||||
void vmw_du_connector_dpms(struct drm_connector *connector, int mode);
|
||||
void vmw_du_connector_save(struct drm_connector *connector);
|
||||
void vmw_du_connector_restore(struct drm_connector *connector);
|
||||
enum drm_connector_status
|
||||
vmw_du_connector_detect(struct drm_connector *connector, bool force);
|
||||
int vmw_du_connector_fill_modes(struct drm_connector *connector,
|
||||
uint32_t max_width, uint32_t max_height);
|
||||
int vmw_du_connector_set_property(struct drm_connector *connector,
|
||||
struct drm_property *property,
|
||||
uint64_t val);
|
||||
|
||||
|
||||
/*
|
||||
* Legacy display unit functions - vmwgfx_ldu.c
|
||||
*/
|
||||
int vmw_kms_init_legacy_display_system(struct vmw_private *dev_priv);
|
||||
int vmw_kms_close_legacy_display_system(struct vmw_private *dev_priv);
|
||||
|
||||
/*
|
||||
* Screen Objects display functions - vmwgfx_scrn.c
|
||||
*/
|
||||
int vmw_kms_init_screen_object_display(struct vmw_private *dev_priv);
|
||||
int vmw_kms_close_screen_object_display(struct vmw_private *dev_priv);
|
||||
int vmw_kms_sou_update_layout(struct vmw_private *dev_priv, unsigned num,
|
||||
struct drm_vmw_rect *rects);
|
||||
bool vmw_kms_screen_object_flippable(struct vmw_private *dev_priv,
|
||||
struct drm_crtc *crtc);
|
||||
void vmw_kms_screen_object_update_implicit_fb(struct vmw_private *dev_priv,
|
||||
struct drm_crtc *crtc);
|
||||
|
||||
|
||||
#endif
|
444
drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
Normal file
444
drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
Normal file
|
@ -0,0 +1,444 @@
|
|||
/**************************************************************************
|
||||
*
|
||||
* Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
|
||||
* All Rights Reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the
|
||||
* "Software"), to deal in the Software without restriction, including
|
||||
* without limitation the rights to use, copy, modify, merge, publish,
|
||||
* distribute, sub license, and/or sell copies of the Software, and to
|
||||
* permit persons to whom the Software is furnished to do so, subject to
|
||||
* the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the
|
||||
* next paragraph) shall be included in all copies or substantial portions
|
||||
* of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
|
||||
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
|
||||
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
|
||||
* USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
**************************************************************************/
|
||||
|
||||
#include "vmwgfx_kms.h"
|
||||
|
||||
|
||||
#define vmw_crtc_to_ldu(x) \
|
||||
container_of(x, struct vmw_legacy_display_unit, base.crtc)
|
||||
#define vmw_encoder_to_ldu(x) \
|
||||
container_of(x, struct vmw_legacy_display_unit, base.encoder)
|
||||
#define vmw_connector_to_ldu(x) \
|
||||
container_of(x, struct vmw_legacy_display_unit, base.connector)
|
||||
|
||||
struct vmw_legacy_display {
|
||||
struct list_head active;
|
||||
|
||||
unsigned num_active;
|
||||
unsigned last_num_active;
|
||||
|
||||
struct vmw_framebuffer *fb;
|
||||
};
|
||||
|
||||
/**
|
||||
* Display unit using the legacy register interface.
|
||||
*/
|
||||
struct vmw_legacy_display_unit {
|
||||
struct vmw_display_unit base;
|
||||
|
||||
struct list_head active;
|
||||
};
|
||||
|
||||
static void vmw_ldu_destroy(struct vmw_legacy_display_unit *ldu)
|
||||
{
|
||||
list_del_init(&ldu->active);
|
||||
vmw_display_unit_cleanup(&ldu->base);
|
||||
kfree(ldu);
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* Legacy Display Unit CRTC functions
|
||||
*/
|
||||
|
||||
static void vmw_ldu_crtc_destroy(struct drm_crtc *crtc)
|
||||
{
|
||||
vmw_ldu_destroy(vmw_crtc_to_ldu(crtc));
|
||||
}
|
||||
|
||||
static int vmw_ldu_commit_list(struct vmw_private *dev_priv)
|
||||
{
|
||||
struct vmw_legacy_display *lds = dev_priv->ldu_priv;
|
||||
struct vmw_legacy_display_unit *entry;
|
||||
struct vmw_display_unit *du = NULL;
|
||||
struct drm_framebuffer *fb = NULL;
|
||||
struct drm_crtc *crtc = NULL;
|
||||
int i = 0, ret;
|
||||
|
||||
/* If there is no display topology the host just assumes
|
||||
* that the guest will set the same layout as the host.
|
||||
*/
|
||||
if (!(dev_priv->capabilities & SVGA_CAP_DISPLAY_TOPOLOGY)) {
|
||||
int w = 0, h = 0;
|
||||
list_for_each_entry(entry, &lds->active, active) {
|
||||
crtc = &entry->base.crtc;
|
||||
w = max(w, crtc->x + crtc->mode.hdisplay);
|
||||
h = max(h, crtc->y + crtc->mode.vdisplay);
|
||||
i++;
|
||||
}
|
||||
|
||||
if (crtc == NULL)
|
||||
return 0;
|
||||
fb = entry->base.crtc.fb;
|
||||
|
||||
return vmw_kms_write_svga(dev_priv, w, h, fb->pitches[0],
|
||||
fb->bits_per_pixel, fb->depth);
|
||||
}
|
||||
|
||||
if (!list_empty(&lds->active)) {
|
||||
entry = list_entry(lds->active.next, typeof(*entry), active);
|
||||
fb = entry->base.crtc.fb;
|
||||
|
||||
vmw_kms_write_svga(dev_priv, fb->width, fb->height, fb->pitches[0],
|
||||
fb->bits_per_pixel, fb->depth);
|
||||
}
|
||||
|
||||
/* Make sure we always show something. */
|
||||
vmw_write(dev_priv, SVGA_REG_NUM_GUEST_DISPLAYS,
|
||||
lds->num_active ? lds->num_active : 1);
|
||||
|
||||
i = 0;
|
||||
list_for_each_entry(entry, &lds->active, active) {
|
||||
crtc = &entry->base.crtc;
|
||||
|
||||
vmw_write(dev_priv, SVGA_REG_DISPLAY_ID, i);
|
||||
vmw_write(dev_priv, SVGA_REG_DISPLAY_IS_PRIMARY, !i);
|
||||
vmw_write(dev_priv, SVGA_REG_DISPLAY_POSITION_X, crtc->x);
|
||||
vmw_write(dev_priv, SVGA_REG_DISPLAY_POSITION_Y, crtc->y);
|
||||
vmw_write(dev_priv, SVGA_REG_DISPLAY_WIDTH, crtc->mode.hdisplay);
|
||||
vmw_write(dev_priv, SVGA_REG_DISPLAY_HEIGHT, crtc->mode.vdisplay);
|
||||
vmw_write(dev_priv, SVGA_REG_DISPLAY_ID, SVGA_ID_INVALID);
|
||||
|
||||
i++;
|
||||
}
|
||||
|
||||
BUG_ON(i != lds->num_active);
|
||||
|
||||
lds->last_num_active = lds->num_active;
|
||||
|
||||
|
||||
/* Find the first du with a cursor. */
|
||||
list_for_each_entry(entry, &lds->active, active) {
|
||||
du = &entry->base;
|
||||
|
||||
if (!du->cursor_dmabuf)
|
||||
continue;
|
||||
|
||||
ret = vmw_cursor_update_dmabuf(dev_priv,
|
||||
du->cursor_dmabuf,
|
||||
64, 64,
|
||||
du->hotspot_x,
|
||||
du->hotspot_y);
|
||||
if (ret == 0)
|
||||
break;
|
||||
|
||||
DRM_ERROR("Could not update cursor image\n");
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int vmw_ldu_del_active(struct vmw_private *vmw_priv,
|
||||
struct vmw_legacy_display_unit *ldu)
|
||||
{
|
||||
struct vmw_legacy_display *ld = vmw_priv->ldu_priv;
|
||||
if (list_empty(&ldu->active))
|
||||
return 0;
|
||||
|
||||
/* Must init otherwise list_empty(&ldu->active) will not work. */
|
||||
list_del_init(&ldu->active);
|
||||
if (--(ld->num_active) == 0) {
|
||||
BUG_ON(!ld->fb);
|
||||
if (ld->fb->unpin)
|
||||
ld->fb->unpin(ld->fb);
|
||||
ld->fb = NULL;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int vmw_ldu_add_active(struct vmw_private *vmw_priv,
|
||||
struct vmw_legacy_display_unit *ldu,
|
||||
struct vmw_framebuffer *vfb)
|
||||
{
|
||||
struct vmw_legacy_display *ld = vmw_priv->ldu_priv;
|
||||
struct vmw_legacy_display_unit *entry;
|
||||
struct list_head *at;
|
||||
|
||||
BUG_ON(!ld->num_active && ld->fb);
|
||||
if (vfb != ld->fb) {
|
||||
if (ld->fb && ld->fb->unpin)
|
||||
ld->fb->unpin(ld->fb);
|
||||
if (vfb->pin)
|
||||
vfb->pin(vfb);
|
||||
ld->fb = vfb;
|
||||
}
|
||||
|
||||
if (!list_empty(&ldu->active))
|
||||
return 0;
|
||||
|
||||
at = &ld->active;
|
||||
list_for_each_entry(entry, &ld->active, active) {
|
||||
if (entry->base.unit > ldu->base.unit)
|
||||
break;
|
||||
|
||||
at = &entry->active;
|
||||
}
|
||||
|
||||
list_add(&ldu->active, at);
|
||||
|
||||
ld->num_active++;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int vmw_ldu_crtc_set_config(struct drm_mode_set *set)
|
||||
{
|
||||
struct vmw_private *dev_priv;
|
||||
struct vmw_legacy_display_unit *ldu;
|
||||
struct drm_connector *connector;
|
||||
struct drm_display_mode *mode;
|
||||
struct drm_encoder *encoder;
|
||||
struct vmw_framebuffer *vfb;
|
||||
struct drm_framebuffer *fb;
|
||||
struct drm_crtc *crtc;
|
||||
|
||||
if (!set)
|
||||
return -EINVAL;
|
||||
|
||||
if (!set->crtc)
|
||||
return -EINVAL;
|
||||
|
||||
/* get the ldu */
|
||||
crtc = set->crtc;
|
||||
ldu = vmw_crtc_to_ldu(crtc);
|
||||
vfb = set->fb ? vmw_framebuffer_to_vfb(set->fb) : NULL;
|
||||
dev_priv = vmw_priv(crtc->dev);
|
||||
|
||||
if (set->num_connectors > 1) {
|
||||
DRM_ERROR("to many connectors\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (set->num_connectors == 1 &&
|
||||
set->connectors[0] != &ldu->base.connector) {
|
||||
DRM_ERROR("connector doesn't match %p %p\n",
|
||||
set->connectors[0], &ldu->base.connector);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* ldu only supports one fb active at the time */
|
||||
if (dev_priv->ldu_priv->fb && vfb &&
|
||||
!(dev_priv->ldu_priv->num_active == 1 &&
|
||||
!list_empty(&ldu->active)) &&
|
||||
dev_priv->ldu_priv->fb != vfb) {
|
||||
DRM_ERROR("Multiple framebuffers not supported\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* since they always map one to one these are safe */
|
||||
connector = &ldu->base.connector;
|
||||
encoder = &ldu->base.encoder;
|
||||
|
||||
/* should we turn the crtc off? */
|
||||
if (set->num_connectors == 0 || !set->mode || !set->fb) {
|
||||
|
||||
connector->encoder = NULL;
|
||||
encoder->crtc = NULL;
|
||||
crtc->fb = NULL;
|
||||
|
||||
vmw_ldu_del_active(dev_priv, ldu);
|
||||
|
||||
return vmw_ldu_commit_list(dev_priv);
|
||||
}
|
||||
|
||||
|
||||
/* we now know we want to set a mode */
|
||||
mode = set->mode;
|
||||
fb = set->fb;
|
||||
|
||||
if (set->x + mode->hdisplay > fb->width ||
|
||||
set->y + mode->vdisplay > fb->height) {
|
||||
DRM_ERROR("set outside of framebuffer\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
vmw_fb_off(dev_priv);
|
||||
|
||||
crtc->fb = fb;
|
||||
encoder->crtc = crtc;
|
||||
connector->encoder = encoder;
|
||||
crtc->x = set->x;
|
||||
crtc->y = set->y;
|
||||
crtc->mode = *mode;
|
||||
|
||||
vmw_ldu_add_active(dev_priv, ldu, vfb);
|
||||
|
||||
return vmw_ldu_commit_list(dev_priv);
|
||||
}
|
||||
|
||||
static struct drm_crtc_funcs vmw_legacy_crtc_funcs = {
|
||||
.save = vmw_du_crtc_save,
|
||||
.restore = vmw_du_crtc_restore,
|
||||
.cursor_set = vmw_du_crtc_cursor_set,
|
||||
.cursor_move = vmw_du_crtc_cursor_move,
|
||||
.gamma_set = vmw_du_crtc_gamma_set,
|
||||
.destroy = vmw_ldu_crtc_destroy,
|
||||
.set_config = vmw_ldu_crtc_set_config,
|
||||
};
|
||||
|
||||
|
||||
/*
|
||||
* Legacy Display Unit encoder functions
|
||||
*/
|
||||
|
||||
static void vmw_ldu_encoder_destroy(struct drm_encoder *encoder)
|
||||
{
|
||||
vmw_ldu_destroy(vmw_encoder_to_ldu(encoder));
|
||||
}
|
||||
|
||||
static struct drm_encoder_funcs vmw_legacy_encoder_funcs = {
|
||||
.destroy = vmw_ldu_encoder_destroy,
|
||||
};
|
||||
|
||||
/*
|
||||
* Legacy Display Unit connector functions
|
||||
*/
|
||||
|
||||
static void vmw_ldu_connector_destroy(struct drm_connector *connector)
|
||||
{
|
||||
vmw_ldu_destroy(vmw_connector_to_ldu(connector));
|
||||
}
|
||||
|
||||
static struct drm_connector_funcs vmw_legacy_connector_funcs = {
|
||||
.dpms = vmw_du_connector_dpms,
|
||||
.save = vmw_du_connector_save,
|
||||
.restore = vmw_du_connector_restore,
|
||||
.detect = vmw_du_connector_detect,
|
||||
.fill_modes = vmw_du_connector_fill_modes,
|
||||
.set_property = vmw_du_connector_set_property,
|
||||
.destroy = vmw_ldu_connector_destroy,
|
||||
};
|
||||
|
||||
static int vmw_ldu_init(struct vmw_private *dev_priv, unsigned unit)
|
||||
{
|
||||
struct vmw_legacy_display_unit *ldu;
|
||||
struct drm_device *dev = dev_priv->dev;
|
||||
struct drm_connector *connector;
|
||||
struct drm_encoder *encoder;
|
||||
struct drm_crtc *crtc;
|
||||
|
||||
ldu = kzalloc(sizeof(*ldu), GFP_KERNEL);
|
||||
if (!ldu)
|
||||
return -ENOMEM;
|
||||
|
||||
ldu->base.unit = unit;
|
||||
crtc = &ldu->base.crtc;
|
||||
encoder = &ldu->base.encoder;
|
||||
connector = &ldu->base.connector;
|
||||
|
||||
INIT_LIST_HEAD(&ldu->active);
|
||||
|
||||
ldu->base.pref_active = (unit == 0);
|
||||
ldu->base.pref_width = dev_priv->initial_width;
|
||||
ldu->base.pref_height = dev_priv->initial_height;
|
||||
ldu->base.pref_mode = NULL;
|
||||
ldu->base.is_implicit = true;
|
||||
|
||||
drm_connector_init(dev, connector, &vmw_legacy_connector_funcs,
|
||||
DRM_MODE_CONNECTOR_VIRTUAL);
|
||||
connector->status = vmw_du_connector_detect(connector, true);
|
||||
|
||||
drm_encoder_init(dev, encoder, &vmw_legacy_encoder_funcs,
|
||||
DRM_MODE_ENCODER_VIRTUAL);
|
||||
drm_mode_connector_attach_encoder(connector, encoder);
|
||||
encoder->possible_crtcs = (1 << unit);
|
||||
encoder->possible_clones = 0;
|
||||
|
||||
drm_crtc_init(dev, crtc, &vmw_legacy_crtc_funcs);
|
||||
|
||||
drm_mode_crtc_set_gamma_size(crtc, 256);
|
||||
|
||||
drm_object_attach_property(&connector->base,
|
||||
dev->mode_config.dirty_info_property,
|
||||
1);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int vmw_kms_init_legacy_display_system(struct vmw_private *dev_priv)
|
||||
{
|
||||
struct drm_device *dev = dev_priv->dev;
|
||||
int i, ret;
|
||||
|
||||
if (dev_priv->ldu_priv) {
|
||||
DRM_INFO("ldu system already on\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
dev_priv->ldu_priv = kmalloc(sizeof(*dev_priv->ldu_priv), GFP_KERNEL);
|
||||
if (!dev_priv->ldu_priv)
|
||||
return -ENOMEM;
|
||||
|
||||
INIT_LIST_HEAD(&dev_priv->ldu_priv->active);
|
||||
dev_priv->ldu_priv->num_active = 0;
|
||||
dev_priv->ldu_priv->last_num_active = 0;
|
||||
dev_priv->ldu_priv->fb = NULL;
|
||||
|
||||
/* for old hardware without multimon only enable one display */
|
||||
if (dev_priv->capabilities & SVGA_CAP_MULTIMON)
|
||||
ret = drm_vblank_init(dev, VMWGFX_NUM_DISPLAY_UNITS);
|
||||
else
|
||||
ret = drm_vblank_init(dev, 1);
|
||||
if (ret != 0)
|
||||
goto err_free;
|
||||
|
||||
ret = drm_mode_create_dirty_info_property(dev);
|
||||
if (ret != 0)
|
||||
goto err_vblank_cleanup;
|
||||
|
||||
if (dev_priv->capabilities & SVGA_CAP_MULTIMON)
|
||||
for (i = 0; i < VMWGFX_NUM_DISPLAY_UNITS; ++i)
|
||||
vmw_ldu_init(dev_priv, i);
|
||||
else
|
||||
vmw_ldu_init(dev_priv, 0);
|
||||
|
||||
return 0;
|
||||
|
||||
err_vblank_cleanup:
|
||||
drm_vblank_cleanup(dev);
|
||||
err_free:
|
||||
kfree(dev_priv->ldu_priv);
|
||||
dev_priv->ldu_priv = NULL;
|
||||
return ret;
|
||||
}
|
||||
|
||||
int vmw_kms_close_legacy_display_system(struct vmw_private *dev_priv)
|
||||
{
|
||||
struct drm_device *dev = dev_priv->dev;
|
||||
|
||||
if (!dev_priv->ldu_priv)
|
||||
return -ENOSYS;
|
||||
|
||||
drm_vblank_cleanup(dev);
|
||||
|
||||
BUG_ON(!list_empty(&dev_priv->ldu_priv->active));
|
||||
|
||||
kfree(dev_priv->ldu_priv);
|
||||
|
||||
return 0;
|
||||
}
|
171
drivers/gpu/drm/vmwgfx/vmwgfx_marker.c
Normal file
171
drivers/gpu/drm/vmwgfx/vmwgfx_marker.c
Normal file
|
@ -0,0 +1,171 @@
|
|||
/**************************************************************************
|
||||
*
|
||||
* Copyright (C) 2010 VMware, Inc., Palo Alto, CA., USA
|
||||
* All Rights Reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the
|
||||
* "Software"), to deal in the Software without restriction, including
|
||||
* without limitation the rights to use, copy, modify, merge, publish,
|
||||
* distribute, sub license, and/or sell copies of the Software, and to
|
||||
* permit persons to whom the Software is furnished to do so, subject to
|
||||
* the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the
|
||||
* next paragraph) shall be included in all copies or substantial portions
|
||||
* of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
|
||||
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
|
||||
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
|
||||
* USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
**************************************************************************/
|
||||
|
||||
|
||||
#include "vmwgfx_drv.h"
|
||||
|
||||
struct vmw_marker {
|
||||
struct list_head head;
|
||||
uint32_t seqno;
|
||||
struct timespec submitted;
|
||||
};
|
||||
|
||||
void vmw_marker_queue_init(struct vmw_marker_queue *queue)
|
||||
{
|
||||
INIT_LIST_HEAD(&queue->head);
|
||||
queue->lag = ns_to_timespec(0);
|
||||
getrawmonotonic(&queue->lag_time);
|
||||
spin_lock_init(&queue->lock);
|
||||
}
|
||||
|
||||
void vmw_marker_queue_takedown(struct vmw_marker_queue *queue)
|
||||
{
|
||||
struct vmw_marker *marker, *next;
|
||||
|
||||
spin_lock(&queue->lock);
|
||||
list_for_each_entry_safe(marker, next, &queue->head, head) {
|
||||
kfree(marker);
|
||||
}
|
||||
spin_unlock(&queue->lock);
|
||||
}
|
||||
|
||||
int vmw_marker_push(struct vmw_marker_queue *queue,
|
||||
uint32_t seqno)
|
||||
{
|
||||
struct vmw_marker *marker = kmalloc(sizeof(*marker), GFP_KERNEL);
|
||||
|
||||
if (unlikely(!marker))
|
||||
return -ENOMEM;
|
||||
|
||||
marker->seqno = seqno;
|
||||
getrawmonotonic(&marker->submitted);
|
||||
spin_lock(&queue->lock);
|
||||
list_add_tail(&marker->head, &queue->head);
|
||||
spin_unlock(&queue->lock);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int vmw_marker_pull(struct vmw_marker_queue *queue,
|
||||
uint32_t signaled_seqno)
|
||||
{
|
||||
struct vmw_marker *marker, *next;
|
||||
struct timespec now;
|
||||
bool updated = false;
|
||||
|
||||
spin_lock(&queue->lock);
|
||||
getrawmonotonic(&now);
|
||||
|
||||
if (list_empty(&queue->head)) {
|
||||
queue->lag = ns_to_timespec(0);
|
||||
queue->lag_time = now;
|
||||
updated = true;
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
list_for_each_entry_safe(marker, next, &queue->head, head) {
|
||||
if (signaled_seqno - marker->seqno > (1 << 30))
|
||||
continue;
|
||||
|
||||
queue->lag = timespec_sub(now, marker->submitted);
|
||||
queue->lag_time = now;
|
||||
updated = true;
|
||||
list_del(&marker->head);
|
||||
kfree(marker);
|
||||
}
|
||||
|
||||
out_unlock:
|
||||
spin_unlock(&queue->lock);
|
||||
|
||||
return (updated) ? 0 : -EBUSY;
|
||||
}
|
||||
|
||||
static struct timespec vmw_timespec_add(struct timespec t1,
|
||||
struct timespec t2)
|
||||
{
|
||||
t1.tv_sec += t2.tv_sec;
|
||||
t1.tv_nsec += t2.tv_nsec;
|
||||
if (t1.tv_nsec >= 1000000000L) {
|
||||
t1.tv_sec += 1;
|
||||
t1.tv_nsec -= 1000000000L;
|
||||
}
|
||||
|
||||
return t1;
|
||||
}
|
||||
|
||||
static struct timespec vmw_fifo_lag(struct vmw_marker_queue *queue)
|
||||
{
|
||||
struct timespec now;
|
||||
|
||||
spin_lock(&queue->lock);
|
||||
getrawmonotonic(&now);
|
||||
queue->lag = vmw_timespec_add(queue->lag,
|
||||
timespec_sub(now, queue->lag_time));
|
||||
queue->lag_time = now;
|
||||
spin_unlock(&queue->lock);
|
||||
return queue->lag;
|
||||
}
|
||||
|
||||
|
||||
static bool vmw_lag_lt(struct vmw_marker_queue *queue,
|
||||
uint32_t us)
|
||||
{
|
||||
struct timespec lag, cond;
|
||||
|
||||
cond = ns_to_timespec((s64) us * 1000);
|
||||
lag = vmw_fifo_lag(queue);
|
||||
return (timespec_compare(&lag, &cond) < 1);
|
||||
}
|
||||
|
||||
int vmw_wait_lag(struct vmw_private *dev_priv,
|
||||
struct vmw_marker_queue *queue, uint32_t us)
|
||||
{
|
||||
struct vmw_marker *marker;
|
||||
uint32_t seqno;
|
||||
int ret;
|
||||
|
||||
while (!vmw_lag_lt(queue, us)) {
|
||||
spin_lock(&queue->lock);
|
||||
if (list_empty(&queue->head))
|
||||
seqno = atomic_read(&dev_priv->marker_seq);
|
||||
else {
|
||||
marker = list_first_entry(&queue->head,
|
||||
struct vmw_marker, head);
|
||||
seqno = marker->seqno;
|
||||
}
|
||||
spin_unlock(&queue->lock);
|
||||
|
||||
ret = vmw_wait_seqno(dev_priv, false, seqno, true,
|
||||
3*HZ);
|
||||
|
||||
if (unlikely(ret != 0))
|
||||
return ret;
|
||||
|
||||
(void) vmw_marker_pull(queue, seqno);
|
||||
}
|
||||
return 0;
|
||||
}
|
619
drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c
Normal file
619
drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c
Normal file
|
@ -0,0 +1,619 @@
|
|||
/**************************************************************************
|
||||
*
|
||||
* Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
|
||||
* All Rights Reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the
|
||||
* "Software"), to deal in the Software without restriction, including
|
||||
* without limitation the rights to use, copy, modify, merge, publish,
|
||||
* distribute, sub license, and/or sell copies of the Software, and to
|
||||
* permit persons to whom the Software is furnished to do so, subject to
|
||||
* the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the
|
||||
* next paragraph) shall be included in all copies or substantial portions
|
||||
* of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
|
||||
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
|
||||
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
|
||||
* USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
**************************************************************************/
|
||||
|
||||
|
||||
#include <drm/drmP.h>
|
||||
#include "vmwgfx_drv.h"
|
||||
|
||||
#include <drm/ttm/ttm_placement.h>
|
||||
|
||||
#include "svga_overlay.h"
|
||||
#include "svga_escape.h"
|
||||
|
||||
#define VMW_MAX_NUM_STREAMS 1
|
||||
#define VMW_OVERLAY_CAP_MASK (SVGA_FIFO_CAP_VIDEO | SVGA_FIFO_CAP_ESCAPE)
|
||||
|
||||
struct vmw_stream {
|
||||
struct vmw_dma_buffer *buf;
|
||||
bool claimed;
|
||||
bool paused;
|
||||
struct drm_vmw_control_stream_arg saved;
|
||||
};
|
||||
|
||||
/**
|
||||
* Overlay control
|
||||
*/
|
||||
struct vmw_overlay {
|
||||
/*
|
||||
* Each stream is a single overlay. In Xv these are called ports.
|
||||
*/
|
||||
struct mutex mutex;
|
||||
struct vmw_stream stream[VMW_MAX_NUM_STREAMS];
|
||||
};
|
||||
|
||||
static inline struct vmw_overlay *vmw_overlay(struct drm_device *dev)
|
||||
{
|
||||
struct vmw_private *dev_priv = vmw_priv(dev);
|
||||
return dev_priv ? dev_priv->overlay_priv : NULL;
|
||||
}
|
||||
|
||||
struct vmw_escape_header {
|
||||
uint32_t cmd;
|
||||
SVGAFifoCmdEscape body;
|
||||
};
|
||||
|
||||
struct vmw_escape_video_flush {
|
||||
struct vmw_escape_header escape;
|
||||
SVGAEscapeVideoFlush flush;
|
||||
};
|
||||
|
||||
static inline void fill_escape(struct vmw_escape_header *header,
|
||||
uint32_t size)
|
||||
{
|
||||
header->cmd = SVGA_CMD_ESCAPE;
|
||||
header->body.nsid = SVGA_ESCAPE_NSID_VMWARE;
|
||||
header->body.size = size;
|
||||
}
|
||||
|
||||
static inline void fill_flush(struct vmw_escape_video_flush *cmd,
|
||||
uint32_t stream_id)
|
||||
{
|
||||
fill_escape(&cmd->escape, sizeof(cmd->flush));
|
||||
cmd->flush.cmdType = SVGA_ESCAPE_VMWARE_VIDEO_FLUSH;
|
||||
cmd->flush.streamId = stream_id;
|
||||
}
|
||||
|
||||
/**
|
||||
* Send put command to hw.
|
||||
*
|
||||
* Returns
|
||||
* -ERESTARTSYS if interrupted by a signal.
|
||||
*/
|
||||
static int vmw_overlay_send_put(struct vmw_private *dev_priv,
|
||||
struct vmw_dma_buffer *buf,
|
||||
struct drm_vmw_control_stream_arg *arg,
|
||||
bool interruptible)
|
||||
{
|
||||
struct vmw_escape_video_flush *flush;
|
||||
size_t fifo_size;
|
||||
bool have_so = dev_priv->sou_priv ? true : false;
|
||||
int i, num_items;
|
||||
SVGAGuestPtr ptr;
|
||||
|
||||
struct {
|
||||
struct vmw_escape_header escape;
|
||||
struct {
|
||||
uint32_t cmdType;
|
||||
uint32_t streamId;
|
||||
} header;
|
||||
} *cmds;
|
||||
struct {
|
||||
uint32_t registerId;
|
||||
uint32_t value;
|
||||
} *items;
|
||||
|
||||
/* defines are a index needs + 1 */
|
||||
if (have_so)
|
||||
num_items = SVGA_VIDEO_DST_SCREEN_ID + 1;
|
||||
else
|
||||
num_items = SVGA_VIDEO_PITCH_3 + 1;
|
||||
|
||||
fifo_size = sizeof(*cmds) + sizeof(*flush) + sizeof(*items) * num_items;
|
||||
|
||||
cmds = vmw_fifo_reserve(dev_priv, fifo_size);
|
||||
/* hardware has hung, can't do anything here */
|
||||
if (!cmds)
|
||||
return -ENOMEM;
|
||||
|
||||
items = (typeof(items))&cmds[1];
|
||||
flush = (struct vmw_escape_video_flush *)&items[num_items];
|
||||
|
||||
/* the size is header + number of items */
|
||||
fill_escape(&cmds->escape, sizeof(*items) * (num_items + 1));
|
||||
|
||||
cmds->header.cmdType = SVGA_ESCAPE_VMWARE_VIDEO_SET_REGS;
|
||||
cmds->header.streamId = arg->stream_id;
|
||||
|
||||
/* the IDs are neatly numbered */
|
||||
for (i = 0; i < num_items; i++)
|
||||
items[i].registerId = i;
|
||||
|
||||
vmw_bo_get_guest_ptr(&buf->base, &ptr);
|
||||
ptr.offset += arg->offset;
|
||||
|
||||
items[SVGA_VIDEO_ENABLED].value = true;
|
||||
items[SVGA_VIDEO_FLAGS].value = arg->flags;
|
||||
items[SVGA_VIDEO_DATA_OFFSET].value = ptr.offset;
|
||||
items[SVGA_VIDEO_FORMAT].value = arg->format;
|
||||
items[SVGA_VIDEO_COLORKEY].value = arg->color_key;
|
||||
items[SVGA_VIDEO_SIZE].value = arg->size;
|
||||
items[SVGA_VIDEO_WIDTH].value = arg->width;
|
||||
items[SVGA_VIDEO_HEIGHT].value = arg->height;
|
||||
items[SVGA_VIDEO_SRC_X].value = arg->src.x;
|
||||
items[SVGA_VIDEO_SRC_Y].value = arg->src.y;
|
||||
items[SVGA_VIDEO_SRC_WIDTH].value = arg->src.w;
|
||||
items[SVGA_VIDEO_SRC_HEIGHT].value = arg->src.h;
|
||||
items[SVGA_VIDEO_DST_X].value = arg->dst.x;
|
||||
items[SVGA_VIDEO_DST_Y].value = arg->dst.y;
|
||||
items[SVGA_VIDEO_DST_WIDTH].value = arg->dst.w;
|
||||
items[SVGA_VIDEO_DST_HEIGHT].value = arg->dst.h;
|
||||
items[SVGA_VIDEO_PITCH_1].value = arg->pitch[0];
|
||||
items[SVGA_VIDEO_PITCH_2].value = arg->pitch[1];
|
||||
items[SVGA_VIDEO_PITCH_3].value = arg->pitch[2];
|
||||
if (have_so) {
|
||||
items[SVGA_VIDEO_DATA_GMRID].value = ptr.gmrId;
|
||||
items[SVGA_VIDEO_DST_SCREEN_ID].value = SVGA_ID_INVALID;
|
||||
}
|
||||
|
||||
fill_flush(flush, arg->stream_id);
|
||||
|
||||
vmw_fifo_commit(dev_priv, fifo_size);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* Send stop command to hw.
|
||||
*
|
||||
* Returns
|
||||
* -ERESTARTSYS if interrupted by a signal.
|
||||
*/
|
||||
static int vmw_overlay_send_stop(struct vmw_private *dev_priv,
|
||||
uint32_t stream_id,
|
||||
bool interruptible)
|
||||
{
|
||||
struct {
|
||||
struct vmw_escape_header escape;
|
||||
SVGAEscapeVideoSetRegs body;
|
||||
struct vmw_escape_video_flush flush;
|
||||
} *cmds;
|
||||
int ret;
|
||||
|
||||
for (;;) {
|
||||
cmds = vmw_fifo_reserve(dev_priv, sizeof(*cmds));
|
||||
if (cmds)
|
||||
break;
|
||||
|
||||
ret = vmw_fallback_wait(dev_priv, false, true, 0,
|
||||
interruptible, 3*HZ);
|
||||
if (interruptible && ret == -ERESTARTSYS)
|
||||
return ret;
|
||||
else
|
||||
BUG_ON(ret != 0);
|
||||
}
|
||||
|
||||
fill_escape(&cmds->escape, sizeof(cmds->body));
|
||||
cmds->body.header.cmdType = SVGA_ESCAPE_VMWARE_VIDEO_SET_REGS;
|
||||
cmds->body.header.streamId = stream_id;
|
||||
cmds->body.items[0].registerId = SVGA_VIDEO_ENABLED;
|
||||
cmds->body.items[0].value = false;
|
||||
fill_flush(&cmds->flush, stream_id);
|
||||
|
||||
vmw_fifo_commit(dev_priv, sizeof(*cmds));
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* Move a buffer to vram or gmr if @pin is set, else unpin the buffer.
|
||||
*
|
||||
* With the introduction of screen objects buffers could now be
|
||||
* used with GMRs instead of being locked to vram.
|
||||
*/
|
||||
static int vmw_overlay_move_buffer(struct vmw_private *dev_priv,
|
||||
struct vmw_dma_buffer *buf,
|
||||
bool pin, bool inter)
|
||||
{
|
||||
if (!pin)
|
||||
return vmw_dmabuf_unpin(dev_priv, buf, inter);
|
||||
|
||||
if (!dev_priv->sou_priv)
|
||||
return vmw_dmabuf_to_vram(dev_priv, buf, true, inter);
|
||||
|
||||
return vmw_dmabuf_to_vram_or_gmr(dev_priv, buf, true, inter);
|
||||
}
|
||||
|
||||
/**
|
||||
* Stop or pause a stream.
|
||||
*
|
||||
* If the stream is paused the no evict flag is removed from the buffer
|
||||
* but left in vram. This allows for instance mode_set to evict it
|
||||
* should it need to.
|
||||
*
|
||||
* The caller must hold the overlay lock.
|
||||
*
|
||||
* @stream_id which stream to stop/pause.
|
||||
* @pause true to pause, false to stop completely.
|
||||
*/
|
||||
static int vmw_overlay_stop(struct vmw_private *dev_priv,
|
||||
uint32_t stream_id, bool pause,
|
||||
bool interruptible)
|
||||
{
|
||||
struct vmw_overlay *overlay = dev_priv->overlay_priv;
|
||||
struct vmw_stream *stream = &overlay->stream[stream_id];
|
||||
int ret;
|
||||
|
||||
/* no buffer attached the stream is completely stopped */
|
||||
if (!stream->buf)
|
||||
return 0;
|
||||
|
||||
/* If the stream is paused this is already done */
|
||||
if (!stream->paused) {
|
||||
ret = vmw_overlay_send_stop(dev_priv, stream_id,
|
||||
interruptible);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/* We just remove the NO_EVICT flag so no -ENOMEM */
|
||||
ret = vmw_overlay_move_buffer(dev_priv, stream->buf, false,
|
||||
interruptible);
|
||||
if (interruptible && ret == -ERESTARTSYS)
|
||||
return ret;
|
||||
else
|
||||
BUG_ON(ret != 0);
|
||||
}
|
||||
|
||||
if (!pause) {
|
||||
vmw_dmabuf_unreference(&stream->buf);
|
||||
stream->paused = false;
|
||||
} else {
|
||||
stream->paused = true;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* Update a stream and send any put or stop fifo commands needed.
|
||||
*
|
||||
* The caller must hold the overlay lock.
|
||||
*
|
||||
* Returns
|
||||
* -ENOMEM if buffer doesn't fit in vram.
|
||||
* -ERESTARTSYS if interrupted.
|
||||
*/
|
||||
static int vmw_overlay_update_stream(struct vmw_private *dev_priv,
|
||||
struct vmw_dma_buffer *buf,
|
||||
struct drm_vmw_control_stream_arg *arg,
|
||||
bool interruptible)
|
||||
{
|
||||
struct vmw_overlay *overlay = dev_priv->overlay_priv;
|
||||
struct vmw_stream *stream = &overlay->stream[arg->stream_id];
|
||||
int ret = 0;
|
||||
|
||||
if (!buf)
|
||||
return -EINVAL;
|
||||
|
||||
DRM_DEBUG(" %s: old %p, new %p, %spaused\n", __func__,
|
||||
stream->buf, buf, stream->paused ? "" : "not ");
|
||||
|
||||
if (stream->buf != buf) {
|
||||
ret = vmw_overlay_stop(dev_priv, arg->stream_id,
|
||||
false, interruptible);
|
||||
if (ret)
|
||||
return ret;
|
||||
} else if (!stream->paused) {
|
||||
/* If the buffers match and not paused then just send
|
||||
* the put command, no need to do anything else.
|
||||
*/
|
||||
ret = vmw_overlay_send_put(dev_priv, buf, arg, interruptible);
|
||||
if (ret == 0)
|
||||
stream->saved = *arg;
|
||||
else
|
||||
BUG_ON(!interruptible);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* We don't start the old stream if we are interrupted.
|
||||
* Might return -ENOMEM if it can't fit the buffer in vram.
|
||||
*/
|
||||
ret = vmw_overlay_move_buffer(dev_priv, buf, true, interruptible);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = vmw_overlay_send_put(dev_priv, buf, arg, interruptible);
|
||||
if (ret) {
|
||||
/* This one needs to happen no matter what. We only remove
|
||||
* the NO_EVICT flag so this is safe from -ENOMEM.
|
||||
*/
|
||||
BUG_ON(vmw_overlay_move_buffer(dev_priv, buf, false, false)
|
||||
!= 0);
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (stream->buf != buf)
|
||||
stream->buf = vmw_dmabuf_reference(buf);
|
||||
stream->saved = *arg;
|
||||
/* stream is no longer stopped/paused */
|
||||
stream->paused = false;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* Stop all streams.
|
||||
*
|
||||
* Used by the fb code when starting.
|
||||
*
|
||||
* Takes the overlay lock.
|
||||
*/
|
||||
int vmw_overlay_stop_all(struct vmw_private *dev_priv)
|
||||
{
|
||||
struct vmw_overlay *overlay = dev_priv->overlay_priv;
|
||||
int i, ret;
|
||||
|
||||
if (!overlay)
|
||||
return 0;
|
||||
|
||||
mutex_lock(&overlay->mutex);
|
||||
|
||||
for (i = 0; i < VMW_MAX_NUM_STREAMS; i++) {
|
||||
struct vmw_stream *stream = &overlay->stream[i];
|
||||
if (!stream->buf)
|
||||
continue;
|
||||
|
||||
ret = vmw_overlay_stop(dev_priv, i, false, false);
|
||||
WARN_ON(ret != 0);
|
||||
}
|
||||
|
||||
mutex_unlock(&overlay->mutex);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* Try to resume all paused streams.
|
||||
*
|
||||
* Used by the kms code after moving a new scanout buffer to vram.
|
||||
*
|
||||
* Takes the overlay lock.
|
||||
*/
|
||||
int vmw_overlay_resume_all(struct vmw_private *dev_priv)
|
||||
{
|
||||
struct vmw_overlay *overlay = dev_priv->overlay_priv;
|
||||
int i, ret;
|
||||
|
||||
if (!overlay)
|
||||
return 0;
|
||||
|
||||
mutex_lock(&overlay->mutex);
|
||||
|
||||
for (i = 0; i < VMW_MAX_NUM_STREAMS; i++) {
|
||||
struct vmw_stream *stream = &overlay->stream[i];
|
||||
if (!stream->paused)
|
||||
continue;
|
||||
|
||||
ret = vmw_overlay_update_stream(dev_priv, stream->buf,
|
||||
&stream->saved, false);
|
||||
if (ret != 0)
|
||||
DRM_INFO("%s: *warning* failed to resume stream %i\n",
|
||||
__func__, i);
|
||||
}
|
||||
|
||||
mutex_unlock(&overlay->mutex);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* Pauses all active streams.
|
||||
*
|
||||
* Used by the kms code when moving a new scanout buffer to vram.
|
||||
*
|
||||
* Takes the overlay lock.
|
||||
*/
|
||||
int vmw_overlay_pause_all(struct vmw_private *dev_priv)
|
||||
{
|
||||
struct vmw_overlay *overlay = dev_priv->overlay_priv;
|
||||
int i, ret;
|
||||
|
||||
if (!overlay)
|
||||
return 0;
|
||||
|
||||
mutex_lock(&overlay->mutex);
|
||||
|
||||
for (i = 0; i < VMW_MAX_NUM_STREAMS; i++) {
|
||||
if (overlay->stream[i].paused)
|
||||
DRM_INFO("%s: *warning* stream %i already paused\n",
|
||||
__func__, i);
|
||||
ret = vmw_overlay_stop(dev_priv, i, true, false);
|
||||
WARN_ON(ret != 0);
|
||||
}
|
||||
|
||||
mutex_unlock(&overlay->mutex);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
static bool vmw_overlay_available(const struct vmw_private *dev_priv)
|
||||
{
|
||||
return (dev_priv->overlay_priv != NULL &&
|
||||
((dev_priv->fifo.capabilities & VMW_OVERLAY_CAP_MASK) ==
|
||||
VMW_OVERLAY_CAP_MASK));
|
||||
}
|
||||
|
||||
int vmw_overlay_ioctl(struct drm_device *dev, void *data,
|
||||
struct drm_file *file_priv)
|
||||
{
|
||||
struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
|
||||
struct vmw_private *dev_priv = vmw_priv(dev);
|
||||
struct vmw_overlay *overlay = dev_priv->overlay_priv;
|
||||
struct drm_vmw_control_stream_arg *arg =
|
||||
(struct drm_vmw_control_stream_arg *)data;
|
||||
struct vmw_dma_buffer *buf;
|
||||
struct vmw_resource *res;
|
||||
int ret;
|
||||
|
||||
if (!vmw_overlay_available(dev_priv))
|
||||
return -ENOSYS;
|
||||
|
||||
ret = vmw_user_stream_lookup(dev_priv, tfile, &arg->stream_id, &res);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
mutex_lock(&overlay->mutex);
|
||||
|
||||
if (!arg->enabled) {
|
||||
ret = vmw_overlay_stop(dev_priv, arg->stream_id, false, true);
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
ret = vmw_user_dmabuf_lookup(tfile, arg->handle, &buf);
|
||||
if (ret)
|
||||
goto out_unlock;
|
||||
|
||||
ret = vmw_overlay_update_stream(dev_priv, buf, arg, true);
|
||||
|
||||
vmw_dmabuf_unreference(&buf);
|
||||
|
||||
out_unlock:
|
||||
mutex_unlock(&overlay->mutex);
|
||||
vmw_resource_unreference(&res);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
int vmw_overlay_num_overlays(struct vmw_private *dev_priv)
|
||||
{
|
||||
if (!vmw_overlay_available(dev_priv))
|
||||
return 0;
|
||||
|
||||
return VMW_MAX_NUM_STREAMS;
|
||||
}
|
||||
|
||||
int vmw_overlay_num_free_overlays(struct vmw_private *dev_priv)
|
||||
{
|
||||
struct vmw_overlay *overlay = dev_priv->overlay_priv;
|
||||
int i, k;
|
||||
|
||||
if (!vmw_overlay_available(dev_priv))
|
||||
return 0;
|
||||
|
||||
mutex_lock(&overlay->mutex);
|
||||
|
||||
for (i = 0, k = 0; i < VMW_MAX_NUM_STREAMS; i++)
|
||||
if (!overlay->stream[i].claimed)
|
||||
k++;
|
||||
|
||||
mutex_unlock(&overlay->mutex);
|
||||
|
||||
return k;
|
||||
}
|
||||
|
||||
int vmw_overlay_claim(struct vmw_private *dev_priv, uint32_t *out)
|
||||
{
|
||||
struct vmw_overlay *overlay = dev_priv->overlay_priv;
|
||||
int i;
|
||||
|
||||
if (!overlay)
|
||||
return -ENOSYS;
|
||||
|
||||
mutex_lock(&overlay->mutex);
|
||||
|
||||
for (i = 0; i < VMW_MAX_NUM_STREAMS; i++) {
|
||||
|
||||
if (overlay->stream[i].claimed)
|
||||
continue;
|
||||
|
||||
overlay->stream[i].claimed = true;
|
||||
*out = i;
|
||||
mutex_unlock(&overlay->mutex);
|
||||
return 0;
|
||||
}
|
||||
|
||||
mutex_unlock(&overlay->mutex);
|
||||
return -ESRCH;
|
||||
}
|
||||
|
||||
int vmw_overlay_unref(struct vmw_private *dev_priv, uint32_t stream_id)
|
||||
{
|
||||
struct vmw_overlay *overlay = dev_priv->overlay_priv;
|
||||
|
||||
BUG_ON(stream_id >= VMW_MAX_NUM_STREAMS);
|
||||
|
||||
if (!overlay)
|
||||
return -ENOSYS;
|
||||
|
||||
mutex_lock(&overlay->mutex);
|
||||
|
||||
WARN_ON(!overlay->stream[stream_id].claimed);
|
||||
vmw_overlay_stop(dev_priv, stream_id, false, false);
|
||||
overlay->stream[stream_id].claimed = false;
|
||||
|
||||
mutex_unlock(&overlay->mutex);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int vmw_overlay_init(struct vmw_private *dev_priv)
|
||||
{
|
||||
struct vmw_overlay *overlay;
|
||||
int i;
|
||||
|
||||
if (dev_priv->overlay_priv)
|
||||
return -EINVAL;
|
||||
|
||||
overlay = kzalloc(sizeof(*overlay), GFP_KERNEL);
|
||||
if (!overlay)
|
||||
return -ENOMEM;
|
||||
|
||||
mutex_init(&overlay->mutex);
|
||||
for (i = 0; i < VMW_MAX_NUM_STREAMS; i++) {
|
||||
overlay->stream[i].buf = NULL;
|
||||
overlay->stream[i].paused = false;
|
||||
overlay->stream[i].claimed = false;
|
||||
}
|
||||
|
||||
dev_priv->overlay_priv = overlay;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int vmw_overlay_close(struct vmw_private *dev_priv)
|
||||
{
|
||||
struct vmw_overlay *overlay = dev_priv->overlay_priv;
|
||||
bool forgotten_buffer = false;
|
||||
int i;
|
||||
|
||||
if (!overlay)
|
||||
return -ENOSYS;
|
||||
|
||||
for (i = 0; i < VMW_MAX_NUM_STREAMS; i++) {
|
||||
if (overlay->stream[i].buf) {
|
||||
forgotten_buffer = true;
|
||||
vmw_overlay_stop(dev_priv, i, false, false);
|
||||
}
|
||||
}
|
||||
|
||||
WARN_ON(forgotten_buffer);
|
||||
|
||||
dev_priv->overlay_priv = NULL;
|
||||
kfree(overlay);
|
||||
|
||||
return 0;
|
||||
}
|
57
drivers/gpu/drm/vmwgfx/vmwgfx_reg.h
Normal file
57
drivers/gpu/drm/vmwgfx/vmwgfx_reg.h
Normal file
|
@ -0,0 +1,57 @@
|
|||
/**************************************************************************
|
||||
*
|
||||
* Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
|
||||
* All Rights Reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the
|
||||
* "Software"), to deal in the Software without restriction, including
|
||||
* without limitation the rights to use, copy, modify, merge, publish,
|
||||
* distribute, sub license, and/or sell copies of the Software, and to
|
||||
* permit persons to whom the Software is furnished to do so, subject to
|
||||
* the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the
|
||||
* next paragraph) shall be included in all copies or substantial portions
|
||||
* of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
|
||||
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
|
||||
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
|
||||
* USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
**************************************************************************/
|
||||
|
||||
/**
|
||||
* This file contains virtual hardware defines for kernel space.
|
||||
*/
|
||||
|
||||
#ifndef _VMWGFX_REG_H_
|
||||
#define _VMWGFX_REG_H_
|
||||
|
||||
#include <linux/types.h>
|
||||
|
||||
#define VMWGFX_INDEX_PORT 0x0
|
||||
#define VMWGFX_VALUE_PORT 0x1
|
||||
#define VMWGFX_IRQSTATUS_PORT 0x8
|
||||
|
||||
struct svga_guest_mem_descriptor {
|
||||
__le32 ppn;
|
||||
__le32 num_pages;
|
||||
};
|
||||
|
||||
struct svga_fifo_cmd_fence {
|
||||
__le32 fence;
|
||||
};
|
||||
|
||||
#define SVGA_SYNC_GENERIC 1
|
||||
#define SVGA_SYNC_FIFOFULL 2
|
||||
|
||||
#include "svga_types.h"
|
||||
|
||||
#include "svga3d_reg.h"
|
||||
|
||||
#endif
|
1298
drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
Normal file
1298
drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
Normal file
File diff suppressed because it is too large
Load diff
84
drivers/gpu/drm/vmwgfx/vmwgfx_resource_priv.h
Normal file
84
drivers/gpu/drm/vmwgfx/vmwgfx_resource_priv.h
Normal file
|
@ -0,0 +1,84 @@
|
|||
/**************************************************************************
|
||||
*
|
||||
* Copyright © 2012 VMware, Inc., Palo Alto, CA., USA
|
||||
* All Rights Reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the
|
||||
* "Software"), to deal in the Software without restriction, including
|
||||
* without limitation the rights to use, copy, modify, merge, publish,
|
||||
* distribute, sub license, and/or sell copies of the Software, and to
|
||||
* permit persons to whom the Software is furnished to do so, subject to
|
||||
* the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the
|
||||
* next paragraph) shall be included in all copies or substantial portions
|
||||
* of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
|
||||
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
|
||||
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
|
||||
* USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
**************************************************************************/
|
||||
|
||||
#ifndef _VMWGFX_RESOURCE_PRIV_H_
|
||||
#define _VMWGFX_RESOURCE_PRIV_H_
|
||||
|
||||
#include "vmwgfx_drv.h"
|
||||
|
||||
/**
|
||||
* struct vmw_user_resource_conv - Identify a derived user-exported resource
|
||||
* type and provide a function to convert its ttm_base_object pointer to
|
||||
* a struct vmw_resource
|
||||
*/
|
||||
struct vmw_user_resource_conv {
|
||||
enum ttm_object_type object_type;
|
||||
struct vmw_resource *(*base_obj_to_res)(struct ttm_base_object *base);
|
||||
void (*res_free) (struct vmw_resource *res);
|
||||
};
|
||||
|
||||
/**
|
||||
* struct vmw_res_func - members and functions common for a resource type
|
||||
*
|
||||
* @res_type: Enum that identifies the lru list to use for eviction.
|
||||
* @needs_backup: Whether the resource is guest-backed and needs
|
||||
* persistent buffer storage.
|
||||
* @type_name: String that identifies the resource type.
|
||||
* @backup_placement: TTM placement for backup buffers.
|
||||
* @may_evict Whether the resource may be evicted.
|
||||
* @create: Create a hardware resource.
|
||||
* @destroy: Destroy a hardware resource.
|
||||
* @bind: Bind a hardware resource to persistent buffer storage.
|
||||
* @unbind: Unbind a hardware resource from persistent
|
||||
* buffer storage.
|
||||
*/
|
||||
|
||||
struct vmw_res_func {
|
||||
enum vmw_res_type res_type;
|
||||
bool needs_backup;
|
||||
const char *type_name;
|
||||
struct ttm_placement *backup_placement;
|
||||
bool may_evict;
|
||||
|
||||
int (*create) (struct vmw_resource *res);
|
||||
int (*destroy) (struct vmw_resource *res);
|
||||
int (*bind) (struct vmw_resource *res,
|
||||
struct ttm_validate_buffer *val_buf);
|
||||
int (*unbind) (struct vmw_resource *res,
|
||||
bool readback,
|
||||
struct ttm_validate_buffer *val_buf);
|
||||
};
|
||||
|
||||
int vmw_resource_alloc_id(struct vmw_resource *res);
|
||||
void vmw_resource_release_id(struct vmw_resource *res);
|
||||
int vmw_resource_init(struct vmw_private *dev_priv, struct vmw_resource *res,
|
||||
bool delay_id,
|
||||
void (*res_free) (struct vmw_resource *res),
|
||||
const struct vmw_res_func *func);
|
||||
void vmw_resource_activate(struct vmw_resource *res,
|
||||
void (*hw_destroy) (struct vmw_resource *));
|
||||
#endif
|
571
drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
Normal file
571
drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
Normal file
|
@ -0,0 +1,571 @@
|
|||
/**************************************************************************
|
||||
*
|
||||
* Copyright © 2011 VMware, Inc., Palo Alto, CA., USA
|
||||
* All Rights Reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the
|
||||
* "Software"), to deal in the Software without restriction, including
|
||||
* without limitation the rights to use, copy, modify, merge, publish,
|
||||
* distribute, sub license, and/or sell copies of the Software, and to
|
||||
* permit persons to whom the Software is furnished to do so, subject to
|
||||
* the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the
|
||||
* next paragraph) shall be included in all copies or substantial portions
|
||||
* of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
|
||||
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
|
||||
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
|
||||
* USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
**************************************************************************/
|
||||
|
||||
#include "vmwgfx_kms.h"
|
||||
|
||||
|
||||
#define vmw_crtc_to_sou(x) \
|
||||
container_of(x, struct vmw_screen_object_unit, base.crtc)
|
||||
#define vmw_encoder_to_sou(x) \
|
||||
container_of(x, struct vmw_screen_object_unit, base.encoder)
|
||||
#define vmw_connector_to_sou(x) \
|
||||
container_of(x, struct vmw_screen_object_unit, base.connector)
|
||||
|
||||
struct vmw_screen_object_display {
|
||||
unsigned num_implicit;
|
||||
|
||||
struct vmw_framebuffer *implicit_fb;
|
||||
};
|
||||
|
||||
/**
|
||||
* Display unit using screen objects.
|
||||
*/
|
||||
struct vmw_screen_object_unit {
|
||||
struct vmw_display_unit base;
|
||||
|
||||
unsigned long buffer_size; /**< Size of allocated buffer */
|
||||
struct vmw_dma_buffer *buffer; /**< Backing store buffer */
|
||||
|
||||
bool defined;
|
||||
bool active_implicit;
|
||||
};
|
||||
|
||||
static void vmw_sou_destroy(struct vmw_screen_object_unit *sou)
|
||||
{
|
||||
vmw_display_unit_cleanup(&sou->base);
|
||||
kfree(sou);
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* Screen Object Display Unit CRTC functions
|
||||
*/
|
||||
|
||||
static void vmw_sou_crtc_destroy(struct drm_crtc *crtc)
|
||||
{
|
||||
vmw_sou_destroy(vmw_crtc_to_sou(crtc));
|
||||
}
|
||||
|
||||
static void vmw_sou_del_active(struct vmw_private *vmw_priv,
|
||||
struct vmw_screen_object_unit *sou)
|
||||
{
|
||||
struct vmw_screen_object_display *ld = vmw_priv->sou_priv;
|
||||
|
||||
if (sou->active_implicit) {
|
||||
if (--(ld->num_implicit) == 0)
|
||||
ld->implicit_fb = NULL;
|
||||
sou->active_implicit = false;
|
||||
}
|
||||
}
|
||||
|
||||
static void vmw_sou_add_active(struct vmw_private *vmw_priv,
|
||||
struct vmw_screen_object_unit *sou,
|
||||
struct vmw_framebuffer *vfb)
|
||||
{
|
||||
struct vmw_screen_object_display *ld = vmw_priv->sou_priv;
|
||||
|
||||
BUG_ON(!ld->num_implicit && ld->implicit_fb);
|
||||
|
||||
if (!sou->active_implicit && sou->base.is_implicit) {
|
||||
ld->implicit_fb = vfb;
|
||||
sou->active_implicit = true;
|
||||
ld->num_implicit++;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Send the fifo command to create a screen.
|
||||
*/
|
||||
static int vmw_sou_fifo_create(struct vmw_private *dev_priv,
|
||||
struct vmw_screen_object_unit *sou,
|
||||
uint32_t x, uint32_t y,
|
||||
struct drm_display_mode *mode)
|
||||
{
|
||||
size_t fifo_size;
|
||||
|
||||
struct {
|
||||
struct {
|
||||
uint32_t cmdType;
|
||||
} header;
|
||||
SVGAScreenObject obj;
|
||||
} *cmd;
|
||||
|
||||
BUG_ON(!sou->buffer);
|
||||
|
||||
fifo_size = sizeof(*cmd);
|
||||
cmd = vmw_fifo_reserve(dev_priv, fifo_size);
|
||||
/* The hardware has hung, nothing we can do about it here. */
|
||||
if (unlikely(cmd == NULL)) {
|
||||
DRM_ERROR("Fifo reserve failed.\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
memset(cmd, 0, fifo_size);
|
||||
cmd->header.cmdType = SVGA_CMD_DEFINE_SCREEN;
|
||||
cmd->obj.structSize = sizeof(SVGAScreenObject);
|
||||
cmd->obj.id = sou->base.unit;
|
||||
cmd->obj.flags = SVGA_SCREEN_HAS_ROOT |
|
||||
(sou->base.unit == 0 ? SVGA_SCREEN_IS_PRIMARY : 0);
|
||||
cmd->obj.size.width = mode->hdisplay;
|
||||
cmd->obj.size.height = mode->vdisplay;
|
||||
if (sou->base.is_implicit) {
|
||||
cmd->obj.root.x = x;
|
||||
cmd->obj.root.y = y;
|
||||
} else {
|
||||
cmd->obj.root.x = sou->base.gui_x;
|
||||
cmd->obj.root.y = sou->base.gui_y;
|
||||
}
|
||||
|
||||
/* Ok to assume that buffer is pinned in vram */
|
||||
vmw_bo_get_guest_ptr(&sou->buffer->base, &cmd->obj.backingStore.ptr);
|
||||
cmd->obj.backingStore.pitch = mode->hdisplay * 4;
|
||||
|
||||
vmw_fifo_commit(dev_priv, fifo_size);
|
||||
|
||||
sou->defined = true;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* Send the fifo command to destroy a screen.
|
||||
*/
|
||||
static int vmw_sou_fifo_destroy(struct vmw_private *dev_priv,
|
||||
struct vmw_screen_object_unit *sou)
|
||||
{
|
||||
size_t fifo_size;
|
||||
int ret;
|
||||
|
||||
struct {
|
||||
struct {
|
||||
uint32_t cmdType;
|
||||
} header;
|
||||
SVGAFifoCmdDestroyScreen body;
|
||||
} *cmd;
|
||||
|
||||
/* no need to do anything */
|
||||
if (unlikely(!sou->defined))
|
||||
return 0;
|
||||
|
||||
fifo_size = sizeof(*cmd);
|
||||
cmd = vmw_fifo_reserve(dev_priv, fifo_size);
|
||||
/* the hardware has hung, nothing we can do about it here */
|
||||
if (unlikely(cmd == NULL)) {
|
||||
DRM_ERROR("Fifo reserve failed.\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
memset(cmd, 0, fifo_size);
|
||||
cmd->header.cmdType = SVGA_CMD_DESTROY_SCREEN;
|
||||
cmd->body.screenId = sou->base.unit;
|
||||
|
||||
vmw_fifo_commit(dev_priv, fifo_size);
|
||||
|
||||
/* Force sync */
|
||||
ret = vmw_fallback_wait(dev_priv, false, true, 0, false, 3*HZ);
|
||||
if (unlikely(ret != 0))
|
||||
DRM_ERROR("Failed to sync with HW");
|
||||
else
|
||||
sou->defined = false;
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* Free the backing store.
|
||||
*/
|
||||
static void vmw_sou_backing_free(struct vmw_private *dev_priv,
|
||||
struct vmw_screen_object_unit *sou)
|
||||
{
|
||||
struct ttm_buffer_object *bo;
|
||||
|
||||
if (unlikely(sou->buffer == NULL))
|
||||
return;
|
||||
|
||||
bo = &sou->buffer->base;
|
||||
ttm_bo_unref(&bo);
|
||||
sou->buffer = NULL;
|
||||
sou->buffer_size = 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* Allocate the backing store for the buffer.
|
||||
*/
|
||||
static int vmw_sou_backing_alloc(struct vmw_private *dev_priv,
|
||||
struct vmw_screen_object_unit *sou,
|
||||
unsigned long size)
|
||||
{
|
||||
int ret;
|
||||
|
||||
if (sou->buffer_size == size)
|
||||
return 0;
|
||||
|
||||
if (sou->buffer)
|
||||
vmw_sou_backing_free(dev_priv, sou);
|
||||
|
||||
sou->buffer = kzalloc(sizeof(*sou->buffer), GFP_KERNEL);
|
||||
if (unlikely(sou->buffer == NULL))
|
||||
return -ENOMEM;
|
||||
|
||||
/* After we have alloced the backing store might not be able to
|
||||
* resume the overlays, this is preferred to failing to alloc.
|
||||
*/
|
||||
vmw_overlay_pause_all(dev_priv);
|
||||
ret = vmw_dmabuf_init(dev_priv, sou->buffer, size,
|
||||
&vmw_vram_ne_placement,
|
||||
false, &vmw_dmabuf_bo_free);
|
||||
vmw_overlay_resume_all(dev_priv);
|
||||
|
||||
if (unlikely(ret != 0))
|
||||
sou->buffer = NULL; /* vmw_dmabuf_init frees on error */
|
||||
else
|
||||
sou->buffer_size = size;
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int vmw_sou_crtc_set_config(struct drm_mode_set *set)
|
||||
{
|
||||
struct vmw_private *dev_priv;
|
||||
struct vmw_screen_object_unit *sou;
|
||||
struct drm_connector *connector;
|
||||
struct drm_display_mode *mode;
|
||||
struct drm_encoder *encoder;
|
||||
struct vmw_framebuffer *vfb;
|
||||
struct drm_framebuffer *fb;
|
||||
struct drm_crtc *crtc;
|
||||
int ret = 0;
|
||||
|
||||
if (!set)
|
||||
return -EINVAL;
|
||||
|
||||
if (!set->crtc)
|
||||
return -EINVAL;
|
||||
|
||||
/* get the sou */
|
||||
crtc = set->crtc;
|
||||
sou = vmw_crtc_to_sou(crtc);
|
||||
vfb = set->fb ? vmw_framebuffer_to_vfb(set->fb) : NULL;
|
||||
dev_priv = vmw_priv(crtc->dev);
|
||||
|
||||
if (set->num_connectors > 1) {
|
||||
DRM_ERROR("to many connectors\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (set->num_connectors == 1 &&
|
||||
set->connectors[0] != &sou->base.connector) {
|
||||
DRM_ERROR("connector doesn't match %p %p\n",
|
||||
set->connectors[0], &sou->base.connector);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* sou only supports one fb active at the time */
|
||||
if (sou->base.is_implicit &&
|
||||
dev_priv->sou_priv->implicit_fb && vfb &&
|
||||
!(dev_priv->sou_priv->num_implicit == 1 &&
|
||||
sou->active_implicit) &&
|
||||
dev_priv->sou_priv->implicit_fb != vfb) {
|
||||
DRM_ERROR("Multiple framebuffers not supported\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* since they always map one to one these are safe */
|
||||
connector = &sou->base.connector;
|
||||
encoder = &sou->base.encoder;
|
||||
|
||||
/* should we turn the crtc off */
|
||||
if (set->num_connectors == 0 || !set->mode || !set->fb) {
|
||||
ret = vmw_sou_fifo_destroy(dev_priv, sou);
|
||||
/* the hardware has hung don't do anything more */
|
||||
if (unlikely(ret != 0))
|
||||
return ret;
|
||||
|
||||
connector->encoder = NULL;
|
||||
encoder->crtc = NULL;
|
||||
crtc->fb = NULL;
|
||||
crtc->x = 0;
|
||||
crtc->y = 0;
|
||||
|
||||
vmw_sou_del_active(dev_priv, sou);
|
||||
|
||||
vmw_sou_backing_free(dev_priv, sou);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
/* we now know we want to set a mode */
|
||||
mode = set->mode;
|
||||
fb = set->fb;
|
||||
|
||||
if (set->x + mode->hdisplay > fb->width ||
|
||||
set->y + mode->vdisplay > fb->height) {
|
||||
DRM_ERROR("set outside of framebuffer\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
vmw_fb_off(dev_priv);
|
||||
|
||||
if (mode->hdisplay != crtc->mode.hdisplay ||
|
||||
mode->vdisplay != crtc->mode.vdisplay) {
|
||||
/* no need to check if depth is different, because backing
|
||||
* store depth is forced to 4 by the device.
|
||||
*/
|
||||
|
||||
ret = vmw_sou_fifo_destroy(dev_priv, sou);
|
||||
/* the hardware has hung don't do anything more */
|
||||
if (unlikely(ret != 0))
|
||||
return ret;
|
||||
|
||||
vmw_sou_backing_free(dev_priv, sou);
|
||||
}
|
||||
|
||||
if (!sou->buffer) {
|
||||
/* forced to depth 4 by the device */
|
||||
size_t size = mode->hdisplay * mode->vdisplay * 4;
|
||||
ret = vmw_sou_backing_alloc(dev_priv, sou, size);
|
||||
if (unlikely(ret != 0))
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = vmw_sou_fifo_create(dev_priv, sou, set->x, set->y, mode);
|
||||
if (unlikely(ret != 0)) {
|
||||
/*
|
||||
* We are in a bit of a situation here, the hardware has
|
||||
* hung and we may or may not have a buffer hanging of
|
||||
* the screen object, best thing to do is not do anything
|
||||
* if we where defined, if not just turn the crtc of.
|
||||
* Not what userspace wants but it needs to htfu.
|
||||
*/
|
||||
if (sou->defined)
|
||||
return ret;
|
||||
|
||||
connector->encoder = NULL;
|
||||
encoder->crtc = NULL;
|
||||
crtc->fb = NULL;
|
||||
crtc->x = 0;
|
||||
crtc->y = 0;
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
vmw_sou_add_active(dev_priv, sou, vfb);
|
||||
|
||||
connector->encoder = encoder;
|
||||
encoder->crtc = crtc;
|
||||
crtc->mode = *mode;
|
||||
crtc->fb = fb;
|
||||
crtc->x = set->x;
|
||||
crtc->y = set->y;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct drm_crtc_funcs vmw_screen_object_crtc_funcs = {
|
||||
.save = vmw_du_crtc_save,
|
||||
.restore = vmw_du_crtc_restore,
|
||||
.cursor_set = vmw_du_crtc_cursor_set,
|
||||
.cursor_move = vmw_du_crtc_cursor_move,
|
||||
.gamma_set = vmw_du_crtc_gamma_set,
|
||||
.destroy = vmw_sou_crtc_destroy,
|
||||
.set_config = vmw_sou_crtc_set_config,
|
||||
.page_flip = vmw_du_page_flip,
|
||||
};
|
||||
|
||||
/*
|
||||
* Screen Object Display Unit encoder functions
|
||||
*/
|
||||
|
||||
static void vmw_sou_encoder_destroy(struct drm_encoder *encoder)
|
||||
{
|
||||
vmw_sou_destroy(vmw_encoder_to_sou(encoder));
|
||||
}
|
||||
|
||||
static struct drm_encoder_funcs vmw_screen_object_encoder_funcs = {
|
||||
.destroy = vmw_sou_encoder_destroy,
|
||||
};
|
||||
|
||||
/*
|
||||
* Screen Object Display Unit connector functions
|
||||
*/
|
||||
|
||||
static void vmw_sou_connector_destroy(struct drm_connector *connector)
|
||||
{
|
||||
vmw_sou_destroy(vmw_connector_to_sou(connector));
|
||||
}
|
||||
|
||||
static struct drm_connector_funcs vmw_legacy_connector_funcs = {
|
||||
.dpms = vmw_du_connector_dpms,
|
||||
.save = vmw_du_connector_save,
|
||||
.restore = vmw_du_connector_restore,
|
||||
.detect = vmw_du_connector_detect,
|
||||
.fill_modes = vmw_du_connector_fill_modes,
|
||||
.set_property = vmw_du_connector_set_property,
|
||||
.destroy = vmw_sou_connector_destroy,
|
||||
};
|
||||
|
||||
static int vmw_sou_init(struct vmw_private *dev_priv, unsigned unit)
|
||||
{
|
||||
struct vmw_screen_object_unit *sou;
|
||||
struct drm_device *dev = dev_priv->dev;
|
||||
struct drm_connector *connector;
|
||||
struct drm_encoder *encoder;
|
||||
struct drm_crtc *crtc;
|
||||
|
||||
sou = kzalloc(sizeof(*sou), GFP_KERNEL);
|
||||
if (!sou)
|
||||
return -ENOMEM;
|
||||
|
||||
sou->base.unit = unit;
|
||||
crtc = &sou->base.crtc;
|
||||
encoder = &sou->base.encoder;
|
||||
connector = &sou->base.connector;
|
||||
|
||||
sou->active_implicit = false;
|
||||
|
||||
sou->base.pref_active = (unit == 0);
|
||||
sou->base.pref_width = dev_priv->initial_width;
|
||||
sou->base.pref_height = dev_priv->initial_height;
|
||||
sou->base.pref_mode = NULL;
|
||||
sou->base.is_implicit = true;
|
||||
|
||||
drm_connector_init(dev, connector, &vmw_legacy_connector_funcs,
|
||||
DRM_MODE_CONNECTOR_VIRTUAL);
|
||||
connector->status = vmw_du_connector_detect(connector, true);
|
||||
|
||||
drm_encoder_init(dev, encoder, &vmw_screen_object_encoder_funcs,
|
||||
DRM_MODE_ENCODER_VIRTUAL);
|
||||
drm_mode_connector_attach_encoder(connector, encoder);
|
||||
encoder->possible_crtcs = (1 << unit);
|
||||
encoder->possible_clones = 0;
|
||||
|
||||
drm_crtc_init(dev, crtc, &vmw_screen_object_crtc_funcs);
|
||||
|
||||
drm_mode_crtc_set_gamma_size(crtc, 256);
|
||||
|
||||
drm_object_attach_property(&connector->base,
|
||||
dev->mode_config.dirty_info_property,
|
||||
1);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int vmw_kms_init_screen_object_display(struct vmw_private *dev_priv)
|
||||
{
|
||||
struct drm_device *dev = dev_priv->dev;
|
||||
int i, ret;
|
||||
|
||||
if (dev_priv->sou_priv) {
|
||||
DRM_INFO("sou system already on\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (!(dev_priv->capabilities & SVGA_CAP_SCREEN_OBJECT_2)) {
|
||||
DRM_INFO("Not using screen objects,"
|
||||
" missing cap SCREEN_OBJECT_2\n");
|
||||
return -ENOSYS;
|
||||
}
|
||||
|
||||
ret = -ENOMEM;
|
||||
dev_priv->sou_priv = kmalloc(sizeof(*dev_priv->sou_priv), GFP_KERNEL);
|
||||
if (unlikely(!dev_priv->sou_priv))
|
||||
goto err_no_mem;
|
||||
|
||||
dev_priv->sou_priv->num_implicit = 0;
|
||||
dev_priv->sou_priv->implicit_fb = NULL;
|
||||
|
||||
ret = drm_vblank_init(dev, VMWGFX_NUM_DISPLAY_UNITS);
|
||||
if (unlikely(ret != 0))
|
||||
goto err_free;
|
||||
|
||||
ret = drm_mode_create_dirty_info_property(dev);
|
||||
if (unlikely(ret != 0))
|
||||
goto err_vblank_cleanup;
|
||||
|
||||
for (i = 0; i < VMWGFX_NUM_DISPLAY_UNITS; ++i)
|
||||
vmw_sou_init(dev_priv, i);
|
||||
|
||||
DRM_INFO("Screen objects system initialized\n");
|
||||
|
||||
return 0;
|
||||
|
||||
err_vblank_cleanup:
|
||||
drm_vblank_cleanup(dev);
|
||||
err_free:
|
||||
kfree(dev_priv->sou_priv);
|
||||
dev_priv->sou_priv = NULL;
|
||||
err_no_mem:
|
||||
return ret;
|
||||
}
|
||||
|
||||
int vmw_kms_close_screen_object_display(struct vmw_private *dev_priv)
|
||||
{
|
||||
struct drm_device *dev = dev_priv->dev;
|
||||
|
||||
if (!dev_priv->sou_priv)
|
||||
return -ENOSYS;
|
||||
|
||||
drm_vblank_cleanup(dev);
|
||||
|
||||
kfree(dev_priv->sou_priv);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns if this unit can be page flipped.
|
||||
* Must be called with the mode_config mutex held.
|
||||
*/
|
||||
bool vmw_kms_screen_object_flippable(struct vmw_private *dev_priv,
|
||||
struct drm_crtc *crtc)
|
||||
{
|
||||
struct vmw_screen_object_unit *sou = vmw_crtc_to_sou(crtc);
|
||||
|
||||
if (!sou->base.is_implicit)
|
||||
return true;
|
||||
|
||||
if (dev_priv->sou_priv->num_implicit != 1)
|
||||
return false;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
/**
|
||||
* Update the implicit fb to the current fb of this crtc.
|
||||
* Must be called with the mode_config mutex held.
|
||||
*/
|
||||
void vmw_kms_screen_object_update_implicit_fb(struct vmw_private *dev_priv,
|
||||
struct drm_crtc *crtc)
|
||||
{
|
||||
struct vmw_screen_object_unit *sou = vmw_crtc_to_sou(crtc);
|
||||
|
||||
BUG_ON(!sou->base.is_implicit);
|
||||
|
||||
dev_priv->sou_priv->implicit_fb =
|
||||
vmw_framebuffer_to_vfb(sou->base.crtc.fb);
|
||||
}
|
893
drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
Normal file
893
drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
Normal file
|
@ -0,0 +1,893 @@
|
|||
/**************************************************************************
|
||||
*
|
||||
* Copyright © 2009-2012 VMware, Inc., Palo Alto, CA., USA
|
||||
* All Rights Reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the
|
||||
* "Software"), to deal in the Software without restriction, including
|
||||
* without limitation the rights to use, copy, modify, merge, publish,
|
||||
* distribute, sub license, and/or sell copies of the Software, and to
|
||||
* permit persons to whom the Software is furnished to do so, subject to
|
||||
* the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the
|
||||
* next paragraph) shall be included in all copies or substantial portions
|
||||
* of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
|
||||
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
|
||||
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
|
||||
* USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
**************************************************************************/
|
||||
|
||||
#include "vmwgfx_drv.h"
|
||||
#include "vmwgfx_resource_priv.h"
|
||||
#include <ttm/ttm_placement.h>
|
||||
#include "svga3d_surfacedefs.h"
|
||||
|
||||
/**
|
||||
* struct vmw_user_surface - User-space visible surface resource
|
||||
*
|
||||
* @base: The TTM base object handling user-space visibility.
|
||||
* @srf: The surface metadata.
|
||||
* @size: TTM accounting size for the surface.
|
||||
*/
|
||||
struct vmw_user_surface {
|
||||
struct ttm_base_object base;
|
||||
struct vmw_surface srf;
|
||||
uint32_t size;
|
||||
uint32_t backup_handle;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct vmw_surface_offset - Backing store mip level offset info
|
||||
*
|
||||
* @face: Surface face.
|
||||
* @mip: Mip level.
|
||||
* @bo_offset: Offset into backing store of this mip level.
|
||||
*
|
||||
*/
|
||||
struct vmw_surface_offset {
|
||||
uint32_t face;
|
||||
uint32_t mip;
|
||||
uint32_t bo_offset;
|
||||
};
|
||||
|
||||
static void vmw_user_surface_free(struct vmw_resource *res);
|
||||
static struct vmw_resource *
|
||||
vmw_user_surface_base_to_res(struct ttm_base_object *base);
|
||||
static int vmw_legacy_srf_bind(struct vmw_resource *res,
|
||||
struct ttm_validate_buffer *val_buf);
|
||||
static int vmw_legacy_srf_unbind(struct vmw_resource *res,
|
||||
bool readback,
|
||||
struct ttm_validate_buffer *val_buf);
|
||||
static int vmw_legacy_srf_create(struct vmw_resource *res);
|
||||
static int vmw_legacy_srf_destroy(struct vmw_resource *res);
|
||||
|
||||
static const struct vmw_user_resource_conv user_surface_conv = {
|
||||
.object_type = VMW_RES_SURFACE,
|
||||
.base_obj_to_res = vmw_user_surface_base_to_res,
|
||||
.res_free = vmw_user_surface_free
|
||||
};
|
||||
|
||||
const struct vmw_user_resource_conv *user_surface_converter =
|
||||
&user_surface_conv;
|
||||
|
||||
|
||||
static uint64_t vmw_user_surface_size;
|
||||
|
||||
static const struct vmw_res_func vmw_legacy_surface_func = {
|
||||
.res_type = vmw_res_surface,
|
||||
.needs_backup = false,
|
||||
.may_evict = true,
|
||||
.type_name = "legacy surfaces",
|
||||
.backup_placement = &vmw_srf_placement,
|
||||
.create = &vmw_legacy_srf_create,
|
||||
.destroy = &vmw_legacy_srf_destroy,
|
||||
.bind = &vmw_legacy_srf_bind,
|
||||
.unbind = &vmw_legacy_srf_unbind
|
||||
};
|
||||
|
||||
/**
|
||||
* struct vmw_surface_dma - SVGA3D DMA command
|
||||
*/
|
||||
struct vmw_surface_dma {
|
||||
SVGA3dCmdHeader header;
|
||||
SVGA3dCmdSurfaceDMA body;
|
||||
SVGA3dCopyBox cb;
|
||||
SVGA3dCmdSurfaceDMASuffix suffix;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct vmw_surface_define - SVGA3D Surface Define command
|
||||
*/
|
||||
struct vmw_surface_define {
|
||||
SVGA3dCmdHeader header;
|
||||
SVGA3dCmdDefineSurface body;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct vmw_surface_destroy - SVGA3D Surface Destroy command
|
||||
*/
|
||||
struct vmw_surface_destroy {
|
||||
SVGA3dCmdHeader header;
|
||||
SVGA3dCmdDestroySurface body;
|
||||
};
|
||||
|
||||
|
||||
/**
|
||||
* vmw_surface_dma_size - Compute fifo size for a dma command.
|
||||
*
|
||||
* @srf: Pointer to a struct vmw_surface
|
||||
*
|
||||
* Computes the required size for a surface dma command for backup or
|
||||
* restoration of the surface represented by @srf.
|
||||
*/
|
||||
static inline uint32_t vmw_surface_dma_size(const struct vmw_surface *srf)
|
||||
{
|
||||
return srf->num_sizes * sizeof(struct vmw_surface_dma);
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* vmw_surface_define_size - Compute fifo size for a surface define command.
|
||||
*
|
||||
* @srf: Pointer to a struct vmw_surface
|
||||
*
|
||||
* Computes the required size for a surface define command for the definition
|
||||
* of the surface represented by @srf.
|
||||
*/
|
||||
static inline uint32_t vmw_surface_define_size(const struct vmw_surface *srf)
|
||||
{
|
||||
return sizeof(struct vmw_surface_define) + srf->num_sizes *
|
||||
sizeof(SVGA3dSize);
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* vmw_surface_destroy_size - Compute fifo size for a surface destroy command.
|
||||
*
|
||||
* Computes the required size for a surface destroy command for the destruction
|
||||
* of a hw surface.
|
||||
*/
|
||||
static inline uint32_t vmw_surface_destroy_size(void)
|
||||
{
|
||||
return sizeof(struct vmw_surface_destroy);
|
||||
}
|
||||
|
||||
/**
|
||||
* vmw_surface_destroy_encode - Encode a surface_destroy command.
|
||||
*
|
||||
* @id: The surface id
|
||||
* @cmd_space: Pointer to memory area in which the commands should be encoded.
|
||||
*/
|
||||
static void vmw_surface_destroy_encode(uint32_t id,
|
||||
void *cmd_space)
|
||||
{
|
||||
struct vmw_surface_destroy *cmd = (struct vmw_surface_destroy *)
|
||||
cmd_space;
|
||||
|
||||
cmd->header.id = SVGA_3D_CMD_SURFACE_DESTROY;
|
||||
cmd->header.size = sizeof(cmd->body);
|
||||
cmd->body.sid = id;
|
||||
}
|
||||
|
||||
/**
|
||||
* vmw_surface_define_encode - Encode a surface_define command.
|
||||
*
|
||||
* @srf: Pointer to a struct vmw_surface object.
|
||||
* @cmd_space: Pointer to memory area in which the commands should be encoded.
|
||||
*/
|
||||
static void vmw_surface_define_encode(const struct vmw_surface *srf,
|
||||
void *cmd_space)
|
||||
{
|
||||
struct vmw_surface_define *cmd = (struct vmw_surface_define *)
|
||||
cmd_space;
|
||||
struct drm_vmw_size *src_size;
|
||||
SVGA3dSize *cmd_size;
|
||||
uint32_t cmd_len;
|
||||
int i;
|
||||
|
||||
cmd_len = sizeof(cmd->body) + srf->num_sizes * sizeof(SVGA3dSize);
|
||||
|
||||
cmd->header.id = SVGA_3D_CMD_SURFACE_DEFINE;
|
||||
cmd->header.size = cmd_len;
|
||||
cmd->body.sid = srf->res.id;
|
||||
cmd->body.surfaceFlags = srf->flags;
|
||||
cmd->body.format = cpu_to_le32(srf->format);
|
||||
for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i)
|
||||
cmd->body.face[i].numMipLevels = srf->mip_levels[i];
|
||||
|
||||
cmd += 1;
|
||||
cmd_size = (SVGA3dSize *) cmd;
|
||||
src_size = srf->sizes;
|
||||
|
||||
for (i = 0; i < srf->num_sizes; ++i, cmd_size++, src_size++) {
|
||||
cmd_size->width = src_size->width;
|
||||
cmd_size->height = src_size->height;
|
||||
cmd_size->depth = src_size->depth;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* vmw_surface_dma_encode - Encode a surface_dma command.
|
||||
*
|
||||
* @srf: Pointer to a struct vmw_surface object.
|
||||
* @cmd_space: Pointer to memory area in which the commands should be encoded.
|
||||
* @ptr: Pointer to an SVGAGuestPtr indicating where the surface contents
|
||||
* should be placed or read from.
|
||||
* @to_surface: Boolean whether to DMA to the surface or from the surface.
|
||||
*/
|
||||
static void vmw_surface_dma_encode(struct vmw_surface *srf,
|
||||
void *cmd_space,
|
||||
const SVGAGuestPtr *ptr,
|
||||
bool to_surface)
|
||||
{
|
||||
uint32_t i;
|
||||
struct vmw_surface_dma *cmd = (struct vmw_surface_dma *)cmd_space;
|
||||
const struct svga3d_surface_desc *desc =
|
||||
svga3dsurface_get_desc(srf->format);
|
||||
|
||||
for (i = 0; i < srf->num_sizes; ++i) {
|
||||
SVGA3dCmdHeader *header = &cmd->header;
|
||||
SVGA3dCmdSurfaceDMA *body = &cmd->body;
|
||||
SVGA3dCopyBox *cb = &cmd->cb;
|
||||
SVGA3dCmdSurfaceDMASuffix *suffix = &cmd->suffix;
|
||||
const struct vmw_surface_offset *cur_offset = &srf->offsets[i];
|
||||
const struct drm_vmw_size *cur_size = &srf->sizes[i];
|
||||
|
||||
header->id = SVGA_3D_CMD_SURFACE_DMA;
|
||||
header->size = sizeof(*body) + sizeof(*cb) + sizeof(*suffix);
|
||||
|
||||
body->guest.ptr = *ptr;
|
||||
body->guest.ptr.offset += cur_offset->bo_offset;
|
||||
body->guest.pitch = svga3dsurface_calculate_pitch(desc,
|
||||
cur_size);
|
||||
body->host.sid = srf->res.id;
|
||||
body->host.face = cur_offset->face;
|
||||
body->host.mipmap = cur_offset->mip;
|
||||
body->transfer = ((to_surface) ? SVGA3D_WRITE_HOST_VRAM :
|
||||
SVGA3D_READ_HOST_VRAM);
|
||||
cb->x = 0;
|
||||
cb->y = 0;
|
||||
cb->z = 0;
|
||||
cb->srcx = 0;
|
||||
cb->srcy = 0;
|
||||
cb->srcz = 0;
|
||||
cb->w = cur_size->width;
|
||||
cb->h = cur_size->height;
|
||||
cb->d = cur_size->depth;
|
||||
|
||||
suffix->suffixSize = sizeof(*suffix);
|
||||
suffix->maximumOffset =
|
||||
svga3dsurface_get_image_buffer_size(desc, cur_size,
|
||||
body->guest.pitch);
|
||||
suffix->flags.discard = 0;
|
||||
suffix->flags.unsynchronized = 0;
|
||||
suffix->flags.reserved = 0;
|
||||
++cmd;
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
/**
|
||||
* vmw_hw_surface_destroy - destroy a Device surface
|
||||
*
|
||||
* @res: Pointer to a struct vmw_resource embedded in a struct
|
||||
* vmw_surface.
|
||||
*
|
||||
* Destroys a the device surface associated with a struct vmw_surface if
|
||||
* any, and adjusts accounting and resource count accordingly.
|
||||
*/
|
||||
static void vmw_hw_surface_destroy(struct vmw_resource *res)
|
||||
{
|
||||
|
||||
struct vmw_private *dev_priv = res->dev_priv;
|
||||
struct vmw_surface *srf;
|
||||
void *cmd;
|
||||
|
||||
if (res->id != -1) {
|
||||
|
||||
cmd = vmw_fifo_reserve(dev_priv, vmw_surface_destroy_size());
|
||||
if (unlikely(cmd == NULL)) {
|
||||
DRM_ERROR("Failed reserving FIFO space for surface "
|
||||
"destruction.\n");
|
||||
return;
|
||||
}
|
||||
|
||||
vmw_surface_destroy_encode(res->id, cmd);
|
||||
vmw_fifo_commit(dev_priv, vmw_surface_destroy_size());
|
||||
|
||||
/*
|
||||
* used_memory_size_atomic, or separate lock
|
||||
* to avoid taking dev_priv::cmdbuf_mutex in
|
||||
* the destroy path.
|
||||
*/
|
||||
|
||||
mutex_lock(&dev_priv->cmdbuf_mutex);
|
||||
srf = vmw_res_to_srf(res);
|
||||
dev_priv->used_memory_size -= res->backup_size;
|
||||
mutex_unlock(&dev_priv->cmdbuf_mutex);
|
||||
}
|
||||
vmw_3d_resource_dec(dev_priv, false);
|
||||
}
|
||||
|
||||
/**
|
||||
* vmw_legacy_srf_create - Create a device surface as part of the
|
||||
* resource validation process.
|
||||
*
|
||||
* @res: Pointer to a struct vmw_surface.
|
||||
*
|
||||
* If the surface doesn't have a hw id.
|
||||
*
|
||||
* Returns -EBUSY if there wasn't sufficient device resources to
|
||||
* complete the validation. Retry after freeing up resources.
|
||||
*
|
||||
* May return other errors if the kernel is out of guest resources.
|
||||
*/
|
||||
static int vmw_legacy_srf_create(struct vmw_resource *res)
|
||||
{
|
||||
struct vmw_private *dev_priv = res->dev_priv;
|
||||
struct vmw_surface *srf;
|
||||
uint32_t submit_size;
|
||||
uint8_t *cmd;
|
||||
int ret;
|
||||
|
||||
if (likely(res->id != -1))
|
||||
return 0;
|
||||
|
||||
srf = vmw_res_to_srf(res);
|
||||
if (unlikely(dev_priv->used_memory_size + res->backup_size >=
|
||||
dev_priv->memory_size))
|
||||
return -EBUSY;
|
||||
|
||||
/*
|
||||
* Alloc id for the resource.
|
||||
*/
|
||||
|
||||
ret = vmw_resource_alloc_id(res);
|
||||
if (unlikely(ret != 0)) {
|
||||
DRM_ERROR("Failed to allocate a surface id.\n");
|
||||
goto out_no_id;
|
||||
}
|
||||
|
||||
if (unlikely(res->id >= SVGA3D_MAX_SURFACE_IDS)) {
|
||||
ret = -EBUSY;
|
||||
goto out_no_fifo;
|
||||
}
|
||||
|
||||
/*
|
||||
* Encode surface define- commands.
|
||||
*/
|
||||
|
||||
submit_size = vmw_surface_define_size(srf);
|
||||
cmd = vmw_fifo_reserve(dev_priv, submit_size);
|
||||
if (unlikely(cmd == NULL)) {
|
||||
DRM_ERROR("Failed reserving FIFO space for surface "
|
||||
"creation.\n");
|
||||
ret = -ENOMEM;
|
||||
goto out_no_fifo;
|
||||
}
|
||||
|
||||
vmw_surface_define_encode(srf, cmd);
|
||||
vmw_fifo_commit(dev_priv, submit_size);
|
||||
/*
|
||||
* Surface memory usage accounting.
|
||||
*/
|
||||
|
||||
dev_priv->used_memory_size += res->backup_size;
|
||||
return 0;
|
||||
|
||||
out_no_fifo:
|
||||
vmw_resource_release_id(res);
|
||||
out_no_id:
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* vmw_legacy_srf_dma - Copy backup data to or from a legacy surface.
|
||||
*
|
||||
* @res: Pointer to a struct vmw_res embedded in a struct
|
||||
* vmw_surface.
|
||||
* @val_buf: Pointer to a struct ttm_validate_buffer containing
|
||||
* information about the backup buffer.
|
||||
* @bind: Boolean wether to DMA to the surface.
|
||||
*
|
||||
* Transfer backup data to or from a legacy surface as part of the
|
||||
* validation process.
|
||||
* May return other errors if the kernel is out of guest resources.
|
||||
* The backup buffer will be fenced or idle upon successful completion,
|
||||
* and if the surface needs persistent backup storage, the backup buffer
|
||||
* will also be returned reserved iff @bind is true.
|
||||
*/
|
||||
static int vmw_legacy_srf_dma(struct vmw_resource *res,
|
||||
struct ttm_validate_buffer *val_buf,
|
||||
bool bind)
|
||||
{
|
||||
SVGAGuestPtr ptr;
|
||||
struct vmw_fence_obj *fence;
|
||||
uint32_t submit_size;
|
||||
struct vmw_surface *srf = vmw_res_to_srf(res);
|
||||
uint8_t *cmd;
|
||||
struct vmw_private *dev_priv = res->dev_priv;
|
||||
|
||||
BUG_ON(val_buf->bo == NULL);
|
||||
|
||||
submit_size = vmw_surface_dma_size(srf);
|
||||
cmd = vmw_fifo_reserve(dev_priv, submit_size);
|
||||
if (unlikely(cmd == NULL)) {
|
||||
DRM_ERROR("Failed reserving FIFO space for surface "
|
||||
"DMA.\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
vmw_bo_get_guest_ptr(val_buf->bo, &ptr);
|
||||
vmw_surface_dma_encode(srf, cmd, &ptr, bind);
|
||||
|
||||
vmw_fifo_commit(dev_priv, submit_size);
|
||||
|
||||
/*
|
||||
* Create a fence object and fence the backup buffer.
|
||||
*/
|
||||
|
||||
(void) vmw_execbuf_fence_commands(NULL, dev_priv,
|
||||
&fence, NULL);
|
||||
|
||||
vmw_fence_single_bo(val_buf->bo, fence);
|
||||
|
||||
if (likely(fence != NULL))
|
||||
vmw_fence_obj_unreference(&fence);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* vmw_legacy_srf_bind - Perform a legacy surface bind as part of the
|
||||
* surface validation process.
|
||||
*
|
||||
* @res: Pointer to a struct vmw_res embedded in a struct
|
||||
* vmw_surface.
|
||||
* @val_buf: Pointer to a struct ttm_validate_buffer containing
|
||||
* information about the backup buffer.
|
||||
*
|
||||
* This function will copy backup data to the surface if the
|
||||
* backup buffer is dirty.
|
||||
*/
|
||||
static int vmw_legacy_srf_bind(struct vmw_resource *res,
|
||||
struct ttm_validate_buffer *val_buf)
|
||||
{
|
||||
if (!res->backup_dirty)
|
||||
return 0;
|
||||
|
||||
return vmw_legacy_srf_dma(res, val_buf, true);
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* vmw_legacy_srf_unbind - Perform a legacy surface unbind as part of the
|
||||
* surface eviction process.
|
||||
*
|
||||
* @res: Pointer to a struct vmw_res embedded in a struct
|
||||
* vmw_surface.
|
||||
* @val_buf: Pointer to a struct ttm_validate_buffer containing
|
||||
* information about the backup buffer.
|
||||
*
|
||||
* This function will copy backup data from the surface.
|
||||
*/
|
||||
static int vmw_legacy_srf_unbind(struct vmw_resource *res,
|
||||
bool readback,
|
||||
struct ttm_validate_buffer *val_buf)
|
||||
{
|
||||
if (unlikely(readback))
|
||||
return vmw_legacy_srf_dma(res, val_buf, false);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* vmw_legacy_srf_destroy - Destroy a device surface as part of a
|
||||
* resource eviction process.
|
||||
*
|
||||
* @res: Pointer to a struct vmw_res embedded in a struct
|
||||
* vmw_surface.
|
||||
*/
|
||||
static int vmw_legacy_srf_destroy(struct vmw_resource *res)
|
||||
{
|
||||
struct vmw_private *dev_priv = res->dev_priv;
|
||||
uint32_t submit_size;
|
||||
uint8_t *cmd;
|
||||
|
||||
BUG_ON(res->id == -1);
|
||||
|
||||
/*
|
||||
* Encode the dma- and surface destroy commands.
|
||||
*/
|
||||
|
||||
submit_size = vmw_surface_destroy_size();
|
||||
cmd = vmw_fifo_reserve(dev_priv, submit_size);
|
||||
if (unlikely(cmd == NULL)) {
|
||||
DRM_ERROR("Failed reserving FIFO space for surface "
|
||||
"eviction.\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
vmw_surface_destroy_encode(res->id, cmd);
|
||||
vmw_fifo_commit(dev_priv, submit_size);
|
||||
|
||||
/*
|
||||
* Surface memory usage accounting.
|
||||
*/
|
||||
|
||||
dev_priv->used_memory_size -= res->backup_size;
|
||||
|
||||
/*
|
||||
* Release the surface ID.
|
||||
*/
|
||||
|
||||
vmw_resource_release_id(res);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* vmw_surface_init - initialize a struct vmw_surface
|
||||
*
|
||||
* @dev_priv: Pointer to a device private struct.
|
||||
* @srf: Pointer to the struct vmw_surface to initialize.
|
||||
* @res_free: Pointer to a resource destructor used to free
|
||||
* the object.
|
||||
*/
|
||||
static int vmw_surface_init(struct vmw_private *dev_priv,
|
||||
struct vmw_surface *srf,
|
||||
void (*res_free) (struct vmw_resource *res))
|
||||
{
|
||||
int ret;
|
||||
struct vmw_resource *res = &srf->res;
|
||||
|
||||
BUG_ON(res_free == NULL);
|
||||
(void) vmw_3d_resource_inc(dev_priv, false);
|
||||
ret = vmw_resource_init(dev_priv, res, true, res_free,
|
||||
&vmw_legacy_surface_func);
|
||||
|
||||
if (unlikely(ret != 0)) {
|
||||
vmw_3d_resource_dec(dev_priv, false);
|
||||
res_free(res);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* The surface won't be visible to hardware until a
|
||||
* surface validate.
|
||||
*/
|
||||
|
||||
vmw_resource_activate(res, vmw_hw_surface_destroy);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* vmw_user_surface_base_to_res - TTM base object to resource converter for
|
||||
* user visible surfaces
|
||||
*
|
||||
* @base: Pointer to a TTM base object
|
||||
*
|
||||
* Returns the struct vmw_resource embedded in a struct vmw_surface
|
||||
* for the user-visible object identified by the TTM base object @base.
|
||||
*/
|
||||
static struct vmw_resource *
|
||||
vmw_user_surface_base_to_res(struct ttm_base_object *base)
|
||||
{
|
||||
return &(container_of(base, struct vmw_user_surface, base)->srf.res);
|
||||
}
|
||||
|
||||
/**
|
||||
* vmw_user_surface_free - User visible surface resource destructor
|
||||
*
|
||||
* @res: A struct vmw_resource embedded in a struct vmw_surface.
|
||||
*/
|
||||
static void vmw_user_surface_free(struct vmw_resource *res)
|
||||
{
|
||||
struct vmw_surface *srf = vmw_res_to_srf(res);
|
||||
struct vmw_user_surface *user_srf =
|
||||
container_of(srf, struct vmw_user_surface, srf);
|
||||
struct vmw_private *dev_priv = srf->res.dev_priv;
|
||||
uint32_t size = user_srf->size;
|
||||
|
||||
kfree(srf->offsets);
|
||||
kfree(srf->sizes);
|
||||
kfree(srf->snooper.image);
|
||||
ttm_base_object_kfree(user_srf, base);
|
||||
ttm_mem_global_free(vmw_mem_glob(dev_priv), size);
|
||||
}
|
||||
|
||||
/**
|
||||
* vmw_user_surface_free - User visible surface TTM base object destructor
|
||||
*
|
||||
* @p_base: Pointer to a pointer to a TTM base object
|
||||
* embedded in a struct vmw_user_surface.
|
||||
*
|
||||
* Drops the base object's reference on its resource, and the
|
||||
* pointer pointed to by *p_base is set to NULL.
|
||||
*/
|
||||
static void vmw_user_surface_base_release(struct ttm_base_object **p_base)
|
||||
{
|
||||
struct ttm_base_object *base = *p_base;
|
||||
struct vmw_user_surface *user_srf =
|
||||
container_of(base, struct vmw_user_surface, base);
|
||||
struct vmw_resource *res = &user_srf->srf.res;
|
||||
|
||||
*p_base = NULL;
|
||||
vmw_resource_unreference(&res);
|
||||
}
|
||||
|
||||
/**
|
||||
* vmw_user_surface_destroy_ioctl - Ioctl function implementing
|
||||
* the user surface destroy functionality.
|
||||
*
|
||||
* @dev: Pointer to a struct drm_device.
|
||||
* @data: Pointer to data copied from / to user-space.
|
||||
* @file_priv: Pointer to a drm file private structure.
|
||||
*/
|
||||
int vmw_surface_destroy_ioctl(struct drm_device *dev, void *data,
|
||||
struct drm_file *file_priv)
|
||||
{
|
||||
struct drm_vmw_surface_arg *arg = (struct drm_vmw_surface_arg *)data;
|
||||
struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
|
||||
|
||||
return ttm_ref_object_base_unref(tfile, arg->sid, TTM_REF_USAGE);
|
||||
}
|
||||
|
||||
/**
|
||||
* vmw_user_surface_define_ioctl - Ioctl function implementing
|
||||
* the user surface define functionality.
|
||||
*
|
||||
* @dev: Pointer to a struct drm_device.
|
||||
* @data: Pointer to data copied from / to user-space.
|
||||
* @file_priv: Pointer to a drm file private structure.
|
||||
*/
|
||||
int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
|
||||
struct drm_file *file_priv)
|
||||
{
|
||||
struct vmw_private *dev_priv = vmw_priv(dev);
|
||||
struct vmw_user_surface *user_srf;
|
||||
struct vmw_surface *srf;
|
||||
struct vmw_resource *res;
|
||||
struct vmw_resource *tmp;
|
||||
union drm_vmw_surface_create_arg *arg =
|
||||
(union drm_vmw_surface_create_arg *)data;
|
||||
struct drm_vmw_surface_create_req *req = &arg->req;
|
||||
struct drm_vmw_surface_arg *rep = &arg->rep;
|
||||
struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
|
||||
struct drm_vmw_size __user *user_sizes;
|
||||
int ret;
|
||||
int i, j;
|
||||
uint32_t cur_bo_offset;
|
||||
struct drm_vmw_size *cur_size;
|
||||
struct vmw_surface_offset *cur_offset;
|
||||
uint32_t num_sizes;
|
||||
uint32_t size;
|
||||
struct vmw_master *vmaster = vmw_master(file_priv->master);
|
||||
const struct svga3d_surface_desc *desc;
|
||||
|
||||
if (unlikely(vmw_user_surface_size == 0))
|
||||
vmw_user_surface_size = ttm_round_pot(sizeof(*user_srf)) +
|
||||
128;
|
||||
|
||||
num_sizes = 0;
|
||||
for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i)
|
||||
num_sizes += req->mip_levels[i];
|
||||
|
||||
if (num_sizes > DRM_VMW_MAX_SURFACE_FACES *
|
||||
DRM_VMW_MAX_MIP_LEVELS)
|
||||
return -EINVAL;
|
||||
|
||||
size = vmw_user_surface_size + 128 +
|
||||
ttm_round_pot(num_sizes * sizeof(struct drm_vmw_size)) +
|
||||
ttm_round_pot(num_sizes * sizeof(struct vmw_surface_offset));
|
||||
|
||||
|
||||
desc = svga3dsurface_get_desc(req->format);
|
||||
if (unlikely(desc->block_desc == SVGA3DBLOCKDESC_NONE)) {
|
||||
DRM_ERROR("Invalid surface format for surface creation.\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
ret = ttm_read_lock(&vmaster->lock, true);
|
||||
if (unlikely(ret != 0))
|
||||
return ret;
|
||||
|
||||
ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
|
||||
size, false, true);
|
||||
if (unlikely(ret != 0)) {
|
||||
if (ret != -ERESTARTSYS)
|
||||
DRM_ERROR("Out of graphics memory for surface"
|
||||
" creation.\n");
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
user_srf = kzalloc(sizeof(*user_srf), GFP_KERNEL);
|
||||
if (unlikely(user_srf == NULL)) {
|
||||
ret = -ENOMEM;
|
||||
goto out_no_user_srf;
|
||||
}
|
||||
|
||||
srf = &user_srf->srf;
|
||||
res = &srf->res;
|
||||
|
||||
srf->flags = req->flags;
|
||||
srf->format = req->format;
|
||||
srf->scanout = req->scanout;
|
||||
|
||||
memcpy(srf->mip_levels, req->mip_levels, sizeof(srf->mip_levels));
|
||||
srf->num_sizes = num_sizes;
|
||||
user_srf->size = size;
|
||||
|
||||
srf->sizes = kmalloc(srf->num_sizes * sizeof(*srf->sizes), GFP_KERNEL);
|
||||
if (unlikely(srf->sizes == NULL)) {
|
||||
ret = -ENOMEM;
|
||||
goto out_no_sizes;
|
||||
}
|
||||
srf->offsets = kmalloc(srf->num_sizes * sizeof(*srf->offsets),
|
||||
GFP_KERNEL);
|
||||
if (unlikely(srf->sizes == NULL)) {
|
||||
ret = -ENOMEM;
|
||||
goto out_no_offsets;
|
||||
}
|
||||
|
||||
user_sizes = (struct drm_vmw_size __user *)(unsigned long)
|
||||
req->size_addr;
|
||||
|
||||
ret = copy_from_user(srf->sizes, user_sizes,
|
||||
srf->num_sizes * sizeof(*srf->sizes));
|
||||
if (unlikely(ret != 0)) {
|
||||
ret = -EFAULT;
|
||||
goto out_no_copy;
|
||||
}
|
||||
|
||||
srf->base_size = *srf->sizes;
|
||||
srf->autogen_filter = SVGA3D_TEX_FILTER_NONE;
|
||||
srf->multisample_count = 1;
|
||||
|
||||
cur_bo_offset = 0;
|
||||
cur_offset = srf->offsets;
|
||||
cur_size = srf->sizes;
|
||||
|
||||
for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i) {
|
||||
for (j = 0; j < srf->mip_levels[i]; ++j) {
|
||||
uint32_t stride = svga3dsurface_calculate_pitch
|
||||
(desc, cur_size);
|
||||
|
||||
cur_offset->face = i;
|
||||
cur_offset->mip = j;
|
||||
cur_offset->bo_offset = cur_bo_offset;
|
||||
cur_bo_offset += svga3dsurface_get_image_buffer_size
|
||||
(desc, cur_size, stride);
|
||||
++cur_offset;
|
||||
++cur_size;
|
||||
}
|
||||
}
|
||||
res->backup_size = cur_bo_offset;
|
||||
if (srf->scanout &&
|
||||
srf->num_sizes == 1 &&
|
||||
srf->sizes[0].width == 64 &&
|
||||
srf->sizes[0].height == 64 &&
|
||||
srf->format == SVGA3D_A8R8G8B8) {
|
||||
|
||||
srf->snooper.image = kmalloc(64 * 64 * 4, GFP_KERNEL);
|
||||
/* clear the image */
|
||||
if (srf->snooper.image) {
|
||||
memset(srf->snooper.image, 0x00, 64 * 64 * 4);
|
||||
} else {
|
||||
DRM_ERROR("Failed to allocate cursor_image\n");
|
||||
ret = -ENOMEM;
|
||||
goto out_no_copy;
|
||||
}
|
||||
} else {
|
||||
srf->snooper.image = NULL;
|
||||
}
|
||||
srf->snooper.crtc = NULL;
|
||||
|
||||
user_srf->base.shareable = false;
|
||||
user_srf->base.tfile = NULL;
|
||||
|
||||
/**
|
||||
* From this point, the generic resource management functions
|
||||
* destroy the object on failure.
|
||||
*/
|
||||
|
||||
ret = vmw_surface_init(dev_priv, srf, vmw_user_surface_free);
|
||||
if (unlikely(ret != 0))
|
||||
goto out_unlock;
|
||||
|
||||
tmp = vmw_resource_reference(&srf->res);
|
||||
ret = ttm_base_object_init(tfile, &user_srf->base,
|
||||
req->shareable, VMW_RES_SURFACE,
|
||||
&vmw_user_surface_base_release, NULL);
|
||||
|
||||
if (unlikely(ret != 0)) {
|
||||
vmw_resource_unreference(&tmp);
|
||||
vmw_resource_unreference(&res);
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
rep->sid = user_srf->base.hash.key;
|
||||
vmw_resource_unreference(&res);
|
||||
|
||||
ttm_read_unlock(&vmaster->lock);
|
||||
return 0;
|
||||
out_no_copy:
|
||||
kfree(srf->offsets);
|
||||
out_no_offsets:
|
||||
kfree(srf->sizes);
|
||||
out_no_sizes:
|
||||
ttm_base_object_kfree(user_srf, base);
|
||||
out_no_user_srf:
|
||||
ttm_mem_global_free(vmw_mem_glob(dev_priv), size);
|
||||
out_unlock:
|
||||
ttm_read_unlock(&vmaster->lock);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* vmw_user_surface_define_ioctl - Ioctl function implementing
|
||||
* the user surface reference functionality.
|
||||
*
|
||||
* @dev: Pointer to a struct drm_device.
|
||||
* @data: Pointer to data copied from / to user-space.
|
||||
* @file_priv: Pointer to a drm file private structure.
|
||||
*/
|
||||
int vmw_surface_reference_ioctl(struct drm_device *dev, void *data,
|
||||
struct drm_file *file_priv)
|
||||
{
|
||||
union drm_vmw_surface_reference_arg *arg =
|
||||
(union drm_vmw_surface_reference_arg *)data;
|
||||
struct drm_vmw_surface_arg *req = &arg->req;
|
||||
struct drm_vmw_surface_create_req *rep = &arg->rep;
|
||||
struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
|
||||
struct vmw_surface *srf;
|
||||
struct vmw_user_surface *user_srf;
|
||||
struct drm_vmw_size __user *user_sizes;
|
||||
struct ttm_base_object *base;
|
||||
int ret = -EINVAL;
|
||||
|
||||
base = ttm_base_object_lookup(tfile, req->sid);
|
||||
if (unlikely(base == NULL)) {
|
||||
DRM_ERROR("Could not find surface to reference.\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (unlikely(base->object_type != VMW_RES_SURFACE))
|
||||
goto out_bad_resource;
|
||||
|
||||
user_srf = container_of(base, struct vmw_user_surface, base);
|
||||
srf = &user_srf->srf;
|
||||
|
||||
ret = ttm_ref_object_add(tfile, &user_srf->base, TTM_REF_USAGE, NULL);
|
||||
if (unlikely(ret != 0)) {
|
||||
DRM_ERROR("Could not add a reference to a surface.\n");
|
||||
goto out_no_reference;
|
||||
}
|
||||
|
||||
rep->flags = srf->flags;
|
||||
rep->format = srf->format;
|
||||
memcpy(rep->mip_levels, srf->mip_levels, sizeof(srf->mip_levels));
|
||||
user_sizes = (struct drm_vmw_size __user *)(unsigned long)
|
||||
rep->size_addr;
|
||||
|
||||
if (user_sizes)
|
||||
ret = copy_to_user(user_sizes, srf->sizes,
|
||||
srf->num_sizes * sizeof(*srf->sizes));
|
||||
if (unlikely(ret != 0)) {
|
||||
DRM_ERROR("copy_to_user failed %p %u\n",
|
||||
user_sizes, srf->num_sizes);
|
||||
ret = -EFAULT;
|
||||
}
|
||||
out_bad_resource:
|
||||
out_no_reference:
|
||||
ttm_base_object_unref(&base);
|
||||
|
||||
return ret;
|
||||
}
|
98
drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c
Normal file
98
drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c
Normal file
|
@ -0,0 +1,98 @@
|
|||
/**************************************************************************
|
||||
*
|
||||
* Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
|
||||
* All Rights Reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the
|
||||
* "Software"), to deal in the Software without restriction, including
|
||||
* without limitation the rights to use, copy, modify, merge, publish,
|
||||
* distribute, sub license, and/or sell copies of the Software, and to
|
||||
* permit persons to whom the Software is furnished to do so, subject to
|
||||
* the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the
|
||||
* next paragraph) shall be included in all copies or substantial portions
|
||||
* of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
|
||||
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
|
||||
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
|
||||
* USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
**************************************************************************/
|
||||
|
||||
#include <drm/drmP.h>
|
||||
#include "vmwgfx_drv.h"
|
||||
|
||||
int vmw_mmap(struct file *filp, struct vm_area_struct *vma)
|
||||
{
|
||||
struct drm_file *file_priv;
|
||||
struct vmw_private *dev_priv;
|
||||
|
||||
if (unlikely(vma->vm_pgoff < VMWGFX_FILE_PAGE_OFFSET)) {
|
||||
DRM_ERROR("Illegal attempt to mmap old fifo space.\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
file_priv = filp->private_data;
|
||||
dev_priv = vmw_priv(file_priv->minor->dev);
|
||||
return ttm_bo_mmap(filp, vma, &dev_priv->bdev);
|
||||
}
|
||||
|
||||
static int vmw_ttm_mem_global_init(struct drm_global_reference *ref)
|
||||
{
|
||||
DRM_INFO("global init.\n");
|
||||
return ttm_mem_global_init(ref->object);
|
||||
}
|
||||
|
||||
static void vmw_ttm_mem_global_release(struct drm_global_reference *ref)
|
||||
{
|
||||
ttm_mem_global_release(ref->object);
|
||||
}
|
||||
|
||||
int vmw_ttm_global_init(struct vmw_private *dev_priv)
|
||||
{
|
||||
struct drm_global_reference *global_ref;
|
||||
int ret;
|
||||
|
||||
global_ref = &dev_priv->mem_global_ref;
|
||||
global_ref->global_type = DRM_GLOBAL_TTM_MEM;
|
||||
global_ref->size = sizeof(struct ttm_mem_global);
|
||||
global_ref->init = &vmw_ttm_mem_global_init;
|
||||
global_ref->release = &vmw_ttm_mem_global_release;
|
||||
|
||||
ret = drm_global_item_ref(global_ref);
|
||||
if (unlikely(ret != 0)) {
|
||||
DRM_ERROR("Failed setting up TTM memory accounting.\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
dev_priv->bo_global_ref.mem_glob =
|
||||
dev_priv->mem_global_ref.object;
|
||||
global_ref = &dev_priv->bo_global_ref.ref;
|
||||
global_ref->global_type = DRM_GLOBAL_TTM_BO;
|
||||
global_ref->size = sizeof(struct ttm_bo_global);
|
||||
global_ref->init = &ttm_bo_global_init;
|
||||
global_ref->release = &ttm_bo_global_release;
|
||||
ret = drm_global_item_ref(global_ref);
|
||||
|
||||
if (unlikely(ret != 0)) {
|
||||
DRM_ERROR("Failed setting up TTM buffer objects.\n");
|
||||
goto out_no_bo;
|
||||
}
|
||||
|
||||
return 0;
|
||||
out_no_bo:
|
||||
drm_global_item_unref(&dev_priv->mem_global_ref);
|
||||
return ret;
|
||||
}
|
||||
|
||||
void vmw_ttm_global_release(struct vmw_private *dev_priv)
|
||||
{
|
||||
drm_global_item_unref(&dev_priv->bo_global_ref.ref);
|
||||
drm_global_item_unref(&dev_priv->mem_global_ref);
|
||||
}
|
Loading…
Add table
Add a link
Reference in a new issue