Skip to content

Commit 298799a

Browse files
committed
drm/vmwgfx: Fix gem refcounting and memory evictions
v2: Add the last part of the ref count fix which was spotted by Philipp Sieweck where the ref count of cpu writers is off due to ERESTARTSYS or EBUSY during bo waits. The initial GEM port broke refcounting on shareable (prime) surfaces and memory evictions. The prime surfaces broke because the parent surfaces weren't increasing the ref count on GEM surfaces, which meant that the memory backing textures could have been deleted while the texture was still accessible. The evictions broke due to a typo, the code was supposed to exit if the passed buffers were not vmw_buffer_object not if they were. They're tied because the evictions depend on having memory to actually evict. This fixes crashes with XA state tracker which is used for xrender acceleration on xf86-video-vmware, apps/tests which use a lot of memory (a good test being the piglit's streaming-texture-leak) and desktops. Signed-off-by: Zack Rusin <zackr@vmware.com> Fixes: 8afa13a ("drm/vmwgfx: Implement DRIVER_GEM") Reported-by: Philipp Sieweck <psi@informatik.uni-kiel.de> Cc: <stable@vger.kernel.org> # v5.17+ Reviewed-by: Maaz Mombasawala <mombasawalam@vmware.com> Reviewed-by: Martin Krastev <krastevm@vmware.com> Link: https://patchwork.freedesktop.org/patch/msgid/20220420040328.1007409-1-zack@kde.org
1 parent 4dee8ee commit 298799a

File tree

3 files changed

+28
-30
lines changed

3 files changed

+28
-30
lines changed

drivers/gpu/drm/vmwgfx/vmwgfx_bo.c

Lines changed: 20 additions & 23 deletions
Original file line numberDiff line numberDiff line change
@@ -46,6 +46,21 @@ vmw_buffer_object(struct ttm_buffer_object *bo)
4646
return container_of(bo, struct vmw_buffer_object, base);
4747
}
4848

49+
/**
50+
* bo_is_vmw - check if the buffer object is a &vmw_buffer_object
51+
* @bo: ttm buffer object to be checked
52+
*
53+
* Uses destroy function associated with the object to determine if this is
54+
* a &vmw_buffer_object.
55+
*
56+
* Returns:
57+
* true if the object is of &vmw_buffer_object type, false if not.
58+
*/
59+
static bool bo_is_vmw(struct ttm_buffer_object *bo)
60+
{
61+
return bo->destroy == &vmw_bo_bo_free ||
62+
bo->destroy == &vmw_gem_destroy;
63+
}
4964

5065
/**
5166
* vmw_bo_pin_in_placement - Validate a buffer to placement.
@@ -615,8 +630,9 @@ int vmw_user_bo_synccpu_ioctl(struct drm_device *dev, void *data,
615630

616631
ret = vmw_user_bo_synccpu_grab(vbo, arg->flags);
617632
vmw_bo_unreference(&vbo);
618-
if (unlikely(ret != 0 && ret != -ERESTARTSYS &&
619-
ret != -EBUSY)) {
633+
if (unlikely(ret != 0)) {
634+
if (ret == -ERESTARTSYS || ret == -EBUSY)
635+
return -EBUSY;
620636
DRM_ERROR("Failed synccpu grab on handle 0x%08x.\n",
621637
(unsigned int) arg->handle);
622638
return ret;
@@ -798,7 +814,7 @@ int vmw_dumb_create(struct drm_file *file_priv,
798814
void vmw_bo_swap_notify(struct ttm_buffer_object *bo)
799815
{
800816
/* Is @bo embedded in a struct vmw_buffer_object? */
801-
if (vmw_bo_is_vmw_bo(bo))
817+
if (!bo_is_vmw(bo))
802818
return;
803819

804820
/* Kill any cached kernel maps before swapout */
@@ -822,7 +838,7 @@ void vmw_bo_move_notify(struct ttm_buffer_object *bo,
822838
struct vmw_buffer_object *vbo;
823839

824840
/* Make sure @bo is embedded in a struct vmw_buffer_object? */
825-
if (vmw_bo_is_vmw_bo(bo))
841+
if (!bo_is_vmw(bo))
826842
return;
827843

828844
vbo = container_of(bo, struct vmw_buffer_object, base);
@@ -843,22 +859,3 @@ void vmw_bo_move_notify(struct ttm_buffer_object *bo,
843859
if (mem->mem_type != VMW_PL_MOB && bo->resource->mem_type == VMW_PL_MOB)
844860
vmw_resource_unbind_list(vbo);
845861
}
846-
847-
/**
848-
* vmw_bo_is_vmw_bo - check if the buffer object is a &vmw_buffer_object
849-
* @bo: buffer object to be checked
850-
*
851-
* Uses destroy function associated with the object to determine if this is
852-
* a &vmw_buffer_object.
853-
*
854-
* Returns:
855-
* true if the object is of &vmw_buffer_object type, false if not.
856-
*/
857-
bool vmw_bo_is_vmw_bo(struct ttm_buffer_object *bo)
858-
{
859-
if (bo->destroy == &vmw_bo_bo_free ||
860-
bo->destroy == &vmw_gem_destroy)
861-
return true;
862-
863-
return false;
864-
}

drivers/gpu/drm/vmwgfx/vmwgfx_drv.c

Lines changed: 2 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -998,13 +998,10 @@ static int vmw_driver_load(struct vmw_private *dev_priv, u32 pci_id)
998998
goto out_no_fman;
999999
}
10001000

1001-
drm_vma_offset_manager_init(&dev_priv->vma_manager,
1002-
DRM_FILE_PAGE_OFFSET_START,
1003-
DRM_FILE_PAGE_OFFSET_SIZE);
10041001
ret = ttm_device_init(&dev_priv->bdev, &vmw_bo_driver,
10051002
dev_priv->drm.dev,
10061003
dev_priv->drm.anon_inode->i_mapping,
1007-
&dev_priv->vma_manager,
1004+
dev_priv->drm.vma_offset_manager,
10081005
dev_priv->map_mode == vmw_dma_alloc_coherent,
10091006
false);
10101007
if (unlikely(ret != 0)) {
@@ -1174,7 +1171,6 @@ static void vmw_driver_unload(struct drm_device *dev)
11741171
vmw_devcaps_destroy(dev_priv);
11751172
vmw_vram_manager_fini(dev_priv);
11761173
ttm_device_fini(&dev_priv->bdev);
1177-
drm_vma_offset_manager_destroy(&dev_priv->vma_manager);
11781174
vmw_release_device_late(dev_priv);
11791175
vmw_fence_manager_takedown(dev_priv->fman);
11801176
if (dev_priv->capabilities & SVGA_CAP_IRQMASK)
@@ -1398,7 +1394,7 @@ vmw_get_unmapped_area(struct file *file, unsigned long uaddr,
13981394
struct vmw_private *dev_priv = vmw_priv(file_priv->minor->dev);
13991395

14001396
return drm_get_unmapped_area(file, uaddr, len, pgoff, flags,
1401-
&dev_priv->vma_manager);
1397+
dev_priv->drm.vma_offset_manager);
14021398
}
14031399

14041400
static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val,

drivers/gpu/drm/vmwgfx/vmwgfx_surface.c

Lines changed: 6 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -683,6 +683,9 @@ static void vmw_user_surface_base_release(struct ttm_base_object **p_base)
683683
container_of(base, struct vmw_user_surface, prime.base);
684684
struct vmw_resource *res = &user_srf->srf.res;
685685

686+
if (base->shareable && res && res->backup)
687+
drm_gem_object_put(&res->backup->base.base);
688+
686689
*p_base = NULL;
687690
vmw_resource_unreference(&res);
688691
}
@@ -857,6 +860,7 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
857860
goto out_unlock;
858861
}
859862
vmw_bo_reference(res->backup);
863+
drm_gem_object_get(&res->backup->base.base);
860864
}
861865

862866
tmp = vmw_resource_reference(&srf->res);
@@ -1513,7 +1517,6 @@ vmw_gb_surface_define_internal(struct drm_device *dev,
15131517
&res->backup);
15141518
if (ret == 0)
15151519
vmw_bo_reference(res->backup);
1516-
15171520
}
15181521

15191522
if (unlikely(ret != 0)) {
@@ -1561,6 +1564,8 @@ vmw_gb_surface_define_internal(struct drm_device *dev,
15611564
drm_vma_node_offset_addr(&res->backup->base.base.vma_node);
15621565
rep->buffer_size = res->backup->base.base.size;
15631566
rep->buffer_handle = backup_handle;
1567+
if (user_srf->prime.base.shareable)
1568+
drm_gem_object_get(&res->backup->base.base);
15641569
} else {
15651570
rep->buffer_map_handle = 0;
15661571
rep->buffer_size = 0;

0 commit comments

Comments
 (0)