Newer
Older
/**************************************************************************
*
* Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
#include "vmwgfx_drv.h"
#include "vmwgfx_reg.h"

David Howells
committed
#include <drm/ttm/ttm_bo_api.h>
#include <drm/ttm/ttm_placement.h>
#define VMW_RES_HT_ORDER 12
/**
* struct vmw_resource_relocation - Relocation info for resources
*
* @head: List head for the software context's relocation list.
* @res: Non-ref-counted pointer to the resource.
* @offset: Offset of 4 byte entries into the command buffer where the
* id that needs fixup is located.
*/
struct vmw_resource_relocation {
struct list_head head;
const struct vmw_resource *res;
unsigned long offset;
};
/**
* struct vmw_resource_val_node - Validation info for resources
*
* @head: List head for the software context's resource list.
* @hash: Hash entry for quick resouce to val_node lookup.
* @res: Ref-counted pointer to the resource.
* @switch_backup: Boolean whether to switch backup buffer on unreserve.
* @new_backup: Refcounted pointer to the new backup buffer.
* @staged_bindings: If @res is a context, tracks bindings set up during
* the command batch. Otherwise NULL.
* @new_backup_offset: New backup buffer offset if @new_backup is non-NUll.
* @first_usage: Set to true the first time the resource is referenced in
* the command stream.
* @no_buffer_needed: Resources do not need to allocate buffer backup on
* reservation. The command stream will provide one.
*/
struct vmw_resource_val_node {
struct list_head head;
struct drm_hash_item hash;
struct vmw_resource *res;
struct vmw_dma_buffer *new_backup;
struct vmw_ctx_binding_state *staged_bindings;
unsigned long new_backup_offset;
bool first_usage;
bool no_buffer_needed;
};
/**
* struct vmw_cmd_entry - Describe a command for the verifier
*
* @user_allow: Whether allowed from the execbuf ioctl.
* @gb_disable: Whether disabled if guest-backed objects are available.
* @gb_enable: Whether enabled iff guest-backed objects are available.
*/
struct vmw_cmd_entry {
int (*func) (struct vmw_private *, struct vmw_sw_context *,
SVGA3dCmdHeader *);
bool user_allow;
bool gb_disable;
bool gb_enable;
};
#define VMW_CMD_DEF(_cmd, _func, _user_allow, _gb_disable, _gb_enable) \
[(_cmd) - SVGA_3D_CMD_BASE] = {(_func), (_user_allow),\
(_gb_disable), (_gb_enable)}
/**
* vmw_resource_unreserve - unreserve resources previously reserved for
* command submission.
*
* @list_head: list of resources to unreserve.
* @backoff: Whether command submission failed.
*/
static void vmw_resource_list_unreserve(struct list_head *list,
bool backoff)
{
struct vmw_resource_val_node *val;
list_for_each_entry(val, list, head) {
struct vmw_resource *res = val->res;
struct vmw_dma_buffer *new_backup =
backoff ? NULL : val->new_backup;
/*
* Transfer staged context bindings to the
* persistent context binding tracker.
*/
if (unlikely(val->staged_bindings)) {
if (!backoff) {
vmw_context_binding_state_transfer
(val->res, val->staged_bindings);
}
kfree(val->staged_bindings);
val->staged_bindings = NULL;
}
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
vmw_resource_unreserve(res, new_backup,
val->new_backup_offset);
vmw_dmabuf_unreference(&val->new_backup);
}
}
/**
* vmw_resource_val_add - Add a resource to the software context's
* resource list if it's not already on it.
*
* @sw_context: Pointer to the software context.
* @res: Pointer to the resource.
* @p_node On successful return points to a valid pointer to a
* struct vmw_resource_val_node, if non-NULL on entry.
*/
static int vmw_resource_val_add(struct vmw_sw_context *sw_context,
struct vmw_resource *res,
struct vmw_resource_val_node **p_node)
{
struct vmw_resource_val_node *node;
struct drm_hash_item *hash;
int ret;
if (likely(drm_ht_find_item(&sw_context->res_ht, (unsigned long) res,
&hash) == 0)) {
node = container_of(hash, struct vmw_resource_val_node, hash);
node->first_usage = false;
if (unlikely(p_node != NULL))
*p_node = node;
return 0;
}
node = kzalloc(sizeof(*node), GFP_KERNEL);
if (unlikely(node == NULL)) {
DRM_ERROR("Failed to allocate a resource validation "
"entry.\n");
return -ENOMEM;
}
node->hash.key = (unsigned long) res;
ret = drm_ht_insert_item(&sw_context->res_ht, &node->hash);
if (unlikely(ret != 0)) {
DRM_ERROR("Failed to initialize a resource validation "
"entry.\n");
kfree(node);
return ret;
}
list_add_tail(&node->head, &sw_context->resource_list);
node->res = vmw_resource_reference(res);
node->first_usage = true;
if (unlikely(p_node != NULL))
*p_node = node;
return 0;
}
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
/**
* vmw_resource_context_res_add - Put resources previously bound to a context on
* the validation list
*
* @dev_priv: Pointer to a device private structure
* @sw_context: Pointer to a software context used for this command submission
* @ctx: Pointer to the context resource
*
* This function puts all resources that were previously bound to @ctx on
* the resource validation list. This is part of the context state reemission
*/
static int vmw_resource_context_res_add(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
struct vmw_resource *ctx)
{
struct list_head *binding_list;
struct vmw_ctx_binding *entry;
int ret = 0;
struct vmw_resource *res;
mutex_lock(&dev_priv->binding_mutex);
binding_list = vmw_context_binding_list(ctx);
list_for_each_entry(entry, binding_list, ctx_list) {
res = vmw_resource_reference_unless_doomed(entry->bi.res);
if (unlikely(res == NULL))
continue;
ret = vmw_resource_val_add(sw_context, entry->bi.res, NULL);
vmw_resource_unreference(&res);
if (unlikely(ret != 0))
break;
}
mutex_unlock(&dev_priv->binding_mutex);
return ret;
}
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
/**
* vmw_resource_relocation_add - Add a relocation to the relocation list
*
* @list: Pointer to head of relocation list.
* @res: The resource.
* @offset: Offset into the command buffer currently being parsed where the
* id that needs fixup is located. Granularity is 4 bytes.
*/
static int vmw_resource_relocation_add(struct list_head *list,
const struct vmw_resource *res,
unsigned long offset)
{
struct vmw_resource_relocation *rel;
rel = kmalloc(sizeof(*rel), GFP_KERNEL);
if (unlikely(rel == NULL)) {
DRM_ERROR("Failed to allocate a resource relocation.\n");
return -ENOMEM;
}
rel->res = res;
rel->offset = offset;
list_add_tail(&rel->head, list);
return 0;
}
/**
* vmw_resource_relocations_free - Free all relocations on a list
*
* @list: Pointer to the head of the relocation list.
*/
static void vmw_resource_relocations_free(struct list_head *list)
{
struct vmw_resource_relocation *rel, *n;
list_for_each_entry_safe(rel, n, list, head) {
list_del(&rel->head);
kfree(rel);
}
}
/**
* vmw_resource_relocations_apply - Apply all relocations on a list
*
* @cb: Pointer to the start of the command buffer bein patch. This need
* not be the same buffer as the one being parsed when the relocation
* list was built, but the contents must be the same modulo the
* resource ids.
* @list: Pointer to the head of the relocation list.
*/
static void vmw_resource_relocations_apply(uint32_t *cb,
struct list_head *list)
{
struct vmw_resource_relocation *rel;
list_for_each_entry(rel, list, head) {
if (likely(rel->res != NULL))
cb[rel->offset] = rel->res->id;
else
cb[rel->offset] = SVGA_3D_CMD_NOP;
}
static int vmw_cmd_invalid(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header)
{
return capable(CAP_SYS_ADMIN) ? : -EINVAL;
}
static int vmw_cmd_ok(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header)
{
return 0;
}
/**
* vmw_bo_to_validate_list - add a bo to a validate list
*
* @sw_context: The software context used for this command submission batch.
* @bo: The buffer object to add.
* @validate_as_mob: Validate this buffer as a MOB.
* @p_val_node: If non-NULL Will be updated with the validate node number
* on return.
*
* Returns -EINVAL if the limit of number of buffer objects per command
* submission is reached.
*/
static int vmw_bo_to_validate_list(struct vmw_sw_context *sw_context,
struct ttm_buffer_object *bo,
bool validate_as_mob,
uint32_t *p_val_node)
{
uint32_t val_node;
struct vmw_validate_buffer *vval_buf;
struct drm_hash_item *hash;
int ret;
if (likely(drm_ht_find_item(&sw_context->res_ht, (unsigned long) bo,
&hash) == 0)) {
vval_buf = container_of(hash, struct vmw_validate_buffer,
hash);
if (unlikely(vval_buf->validate_as_mob != validate_as_mob)) {
DRM_ERROR("Inconsistent buffer usage.\n");
return -EINVAL;
}
val_buf = &vval_buf->base;
val_node = vval_buf - sw_context->val_bufs;
} else {
val_node = sw_context->cur_val_buf;
if (unlikely(val_node >= VMWGFX_MAX_VALIDATIONS)) {
DRM_ERROR("Max number of DMA buffers per submission "
"exceeded.\n");
return -EINVAL;
}
vval_buf = &sw_context->val_bufs[val_node];
vval_buf->hash.key = (unsigned long) bo;
ret = drm_ht_insert_item(&sw_context->res_ht, &vval_buf->hash);
if (unlikely(ret != 0)) {
DRM_ERROR("Failed to initialize a buffer validation "
"entry.\n");
return ret;
}
++sw_context->cur_val_buf;
val_buf = &vval_buf->base;
list_add_tail(&val_buf->head, &sw_context->validate_nodes);
vval_buf->validate_as_mob = validate_as_mob;
sw_context->fence_flags |= DRM_VMW_FENCE_FLAG_EXEC;
if (p_val_node)
*p_val_node = val_node;
return 0;
}
/**
* vmw_resources_reserve - Reserve all resources on the sw_context's
* resource list.
*
* @sw_context: Pointer to the software context.
*
* Note that since vmware's command submission currently is protected by
* the cmdbuf mutex, no fancy deadlock avoidance is required for resources,
* since only a single thread at once will attempt this.
*/
static int vmw_resources_reserve(struct vmw_sw_context *sw_context)
struct vmw_resource_val_node *val;
list_for_each_entry(val, &sw_context->resource_list, head) {
struct vmw_resource *res = val->res;
ret = vmw_resource_reserve(res, val->no_buffer_needed);
if (unlikely(ret != 0))
return ret;
if (res->backup) {
struct ttm_buffer_object *bo = &res->backup->base;
ret = vmw_bo_to_validate_list
(sw_context, bo,
vmw_resource_needs_backup(res), NULL);
if (unlikely(ret != 0))
return ret;
}
/**
* vmw_resources_validate - Validate all resources on the sw_context's
* resource list.
*
* @sw_context: Pointer to the software context.
*
* Before this function is called, all resource backup buffers must have
* been validated.
*/
static int vmw_resources_validate(struct vmw_sw_context *sw_context)
{
struct vmw_resource_val_node *val;
int ret;
list_for_each_entry(val, &sw_context->resource_list, head) {
struct vmw_resource *res = val->res;
ret = vmw_resource_validate(res);
if (unlikely(ret != 0)) {
if (ret != -ERESTARTSYS)
DRM_ERROR("Failed to validate resource.\n");
return ret;
}
}
* vmw_cmd_compat_res_check - Check that a resource is present and if so, put it
* on the resource validate list unless it's already there.
*
* @dev_priv: Pointer to a device private structure.
* @sw_context: Pointer to the software context.
* @res_type: Resource type.
* @converter: User-space visisble type specific information.
* @id: user-space resource id handle.
* @id_loc: Pointer to the location in the command buffer currently being
* parsed from where the user-space resource id handle is located.
* @p_val: Pointer to pointer to resource validalidation node. Populated
* on exit.
static int
vmw_cmd_compat_res_check(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
enum vmw_res_type res_type,
const struct vmw_user_resource_conv *converter,
uint32_t id,
uint32_t *id_loc,
struct vmw_resource_val_node **p_val)
struct vmw_res_cache_entry *rcache =
&sw_context->res_cache[res_type];
struct vmw_resource_val_node *node;
int ret;
if (id == SVGA3D_INVALID_ID) {
if (p_val)
*p_val = NULL;
if (res_type == vmw_res_context) {
DRM_ERROR("Illegal context invalid id.\n");
return -EINVAL;
}

Thomas Hellstrom
committed
return 0;
}

Thomas Hellstrom
committed
/*
* Fastpath in case of repeated commands referencing the same
* resource
*/

Thomas Hellstrom
committed
if (likely(rcache->valid && id == rcache->handle)) {
const struct vmw_resource *res = rcache->res;
rcache->node->first_usage = false;
if (p_val)
*p_val = rcache->node;
return vmw_resource_relocation_add
(&sw_context->res_relocations, res,
id_loc - sw_context->buf_start);
ret = vmw_user_resource_lookup_handle(dev_priv,
sw_context->fp->tfile,
id,
DRM_ERROR("Could not find or use resource 0x%08x.\n",
(unsigned) id);
rcache->valid = true;
rcache->res = res;
rcache->handle = id;
ret = vmw_resource_relocation_add(&sw_context->res_relocations,
res,
id_loc - sw_context->buf_start);
if (unlikely(ret != 0))
goto out_no_reloc;
ret = vmw_resource_val_add(sw_context, res, &node);
if (unlikely(ret != 0))
goto out_no_reloc;
rcache->node = node;
if (p_val)
*p_val = node;
if (dev_priv->has_mob && node->first_usage &&
res_type == vmw_res_context) {
ret = vmw_resource_context_res_add(dev_priv, sw_context, res);
if (unlikely(ret != 0))
goto out_no_reloc;
node->staged_bindings =
kzalloc(sizeof(*node->staged_bindings), GFP_KERNEL);
if (node->staged_bindings == NULL) {
DRM_ERROR("Failed to allocate context binding "
"information.\n");
goto out_no_reloc;
}
INIT_LIST_HEAD(&node->staged_bindings->list);
}
vmw_resource_unreference(&res);
out_no_reloc:
BUG_ON(sw_context->error_resource != NULL);
sw_context->error_resource = res;
return ret;
/**
* vmw_cmd_res_check - Check that a resource is present and if so, put it
* on the resource validate list unless it's already there.
*
* @dev_priv: Pointer to a device private structure.
* @sw_context: Pointer to the software context.
* @res_type: Resource type.
* @converter: User-space visisble type specific information.
* @id_loc: Pointer to the location in the command buffer currently being
* parsed from where the user-space resource id handle is located.
* @p_val: Pointer to pointer to resource validalidation node. Populated
* on exit.
*/
static int
vmw_cmd_res_check(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
enum vmw_res_type res_type,
const struct vmw_user_resource_conv *converter,
uint32_t *id_loc,
struct vmw_resource_val_node **p_val)
{
return vmw_cmd_compat_res_check(dev_priv, sw_context, res_type,
converter, *id_loc, id_loc, p_val);
}
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
/**
* vmw_rebind_contexts - Rebind all resources previously bound to
* referenced contexts.
*
* @sw_context: Pointer to the software context.
*
* Rebind context binding points that have been scrubbed because of eviction.
*/
static int vmw_rebind_contexts(struct vmw_sw_context *sw_context)
{
struct vmw_resource_val_node *val;
int ret;
list_for_each_entry(val, &sw_context->resource_list, head) {
if (likely(!val->staged_bindings))
continue;
ret = vmw_context_rebind_all(val->res);
if (unlikely(ret != 0)) {
if (ret != -ERESTARTSYS)
DRM_ERROR("Failed to rebind context.\n");
return ret;
}
}
return 0;
}
/**
* vmw_cmd_cid_check - Check a command header for valid context information.
*
* @dev_priv: Pointer to a device private structure.
* @sw_context: Pointer to the software context.
* @header: A command header with an embedded user-space context handle.
*
* Convenience function: Call vmw_cmd_res_check with the user-space context
* handle embedded in @header.
*/
static int vmw_cmd_cid_check(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header)
{
struct vmw_cid_cmd {
SVGA3dCmdHeader header;
} *cmd;
cmd = container_of(header, struct vmw_cid_cmd, header);
return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
user_context_converter, &cmd->cid, NULL);
}
static int vmw_cmd_set_render_target_check(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header)
{
struct vmw_sid_cmd {
SVGA3dCmdHeader header;
SVGA3dCmdSetRenderTarget body;
} *cmd;
struct vmw_resource_val_node *ctx_node;
struct vmw_resource_val_node *res_node;
cmd = container_of(header, struct vmw_sid_cmd, header);
ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
user_context_converter, &cmd->body.cid,
&ctx_node);
if (unlikely(ret != 0))
return ret;
ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
user_surface_converter,
&cmd->body.target.sid, &res_node);
if (unlikely(ret != 0))
return ret;
if (dev_priv->has_mob) {
struct vmw_ctx_bindinfo bi;
bi.ctx = ctx_node->res;
bi.res = res_node ? res_node->res : NULL;
bi.bt = vmw_ctx_binding_rt;
bi.i1.rt_type = cmd->body.type;
return vmw_context_binding_add(ctx_node->staged_bindings, &bi);
}
return 0;
}
static int vmw_cmd_surface_copy_check(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header)
{
struct vmw_sid_cmd {
SVGA3dCmdHeader header;
SVGA3dCmdSurfaceCopy body;
} *cmd;
int ret;
cmd = container_of(header, struct vmw_sid_cmd, header);
ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
user_surface_converter,
&cmd->body.src.sid, NULL);
if (unlikely(ret != 0))
return ret;
return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
user_surface_converter,
&cmd->body.dest.sid, NULL);
}
static int vmw_cmd_stretch_blt_check(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header)
{
struct vmw_sid_cmd {
SVGA3dCmdHeader header;
SVGA3dCmdSurfaceStretchBlt body;
} *cmd;
int ret;
cmd = container_of(header, struct vmw_sid_cmd, header);
ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
user_surface_converter,
&cmd->body.src.sid, NULL);
if (unlikely(ret != 0))
return ret;
return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
user_surface_converter,
&cmd->body.dest.sid, NULL);
}
static int vmw_cmd_blt_surf_screen_check(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header)
{
struct vmw_sid_cmd {
SVGA3dCmdHeader header;
SVGA3dCmdBlitSurfaceToScreen body;
} *cmd;
cmd = container_of(header, struct vmw_sid_cmd, header);
return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
user_surface_converter,
&cmd->body.srcImage.sid, NULL);
}
static int vmw_cmd_present_check(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header)
{
struct vmw_sid_cmd {
SVGA3dCmdHeader header;
SVGA3dCmdPresent body;
} *cmd;
cmd = container_of(header, struct vmw_sid_cmd, header);
return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
user_surface_converter, &cmd->body.sid,
NULL);
/**
* vmw_query_bo_switch_prepare - Prepare to switch pinned buffer for queries.
*
* @dev_priv: The device private structure.
* @new_query_bo: The new buffer holding query results.
* @sw_context: The software context used for this command submission.
*
* This function checks whether @new_query_bo is suitable for holding
* query results, and if another buffer currently is pinned for query
* results. If so, the function prepares the state of @sw_context for
* switching pinned buffers after successful submission of the current
*/
static int vmw_query_bo_switch_prepare(struct vmw_private *dev_priv,
struct ttm_buffer_object *new_query_bo,
struct vmw_sw_context *sw_context)
{
struct vmw_res_cache_entry *ctx_entry =
&sw_context->res_cache[vmw_res_context];
BUG_ON(!ctx_entry->valid);
sw_context->last_query_ctx = ctx_entry->res;
if (unlikely(new_query_bo != sw_context->cur_query_bo)) {
if (unlikely(new_query_bo->num_pages > 4)) {
DRM_ERROR("Query buffer too large.\n");
return -EINVAL;
}
if (unlikely(sw_context->cur_query_bo != NULL)) {
sw_context->needs_post_query_barrier = true;
ret = vmw_bo_to_validate_list(sw_context,
sw_context->cur_query_bo,
dev_priv->has_mob, NULL);
if (unlikely(ret != 0))
return ret;
}
sw_context->cur_query_bo = new_query_bo;
ret = vmw_bo_to_validate_list(sw_context,
dev_priv->dummy_query_bo,
dev_priv->has_mob, NULL);
if (unlikely(ret != 0))
return ret;
}
return 0;
}
/**
* vmw_query_bo_switch_commit - Finalize switching pinned query buffer
*
* @dev_priv: The device private structure.
* @sw_context: The software context used for this command submission batch.
*
* This function will check if we're switching query buffers, and will then,
* issue a dummy occlusion query wait used as a query barrier. When the fence
* object following that query wait has signaled, we are sure that all
* preceding queries have finished, and the old query buffer can be unpinned.
* However, since both the new query buffer and the old one are fenced with
* that fence, we can do an asynchronus unpin now, and be sure that the
* old query buffer won't be moved until the fence has signaled.
*
* As mentioned above, both the new - and old query buffers need to be fenced
* using a sequence emitted *after* calling this function.
*/
static void vmw_query_bo_switch_commit(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context)
{
/*
* The validate list should still hold references to all
* contexts here.
*/
if (sw_context->needs_post_query_barrier) {
struct vmw_res_cache_entry *ctx_entry =
&sw_context->res_cache[vmw_res_context];
struct vmw_resource *ctx;
int ret;
BUG_ON(!ctx_entry->valid);
ctx = ctx_entry->res;
ret = vmw_fifo_emit_dummy_query(dev_priv, ctx->id);
if (unlikely(ret != 0))
DRM_ERROR("Out of fifo space for dummy query.\n");
}
if (dev_priv->pinned_bo != sw_context->cur_query_bo) {
if (dev_priv->pinned_bo) {
vmw_bo_pin(dev_priv->pinned_bo, false);
ttm_bo_unref(&dev_priv->pinned_bo);
}
if (!sw_context->needs_post_query_barrier) {
vmw_bo_pin(sw_context->cur_query_bo, true);
/*
* We pin also the dummy_query_bo buffer so that we
* don't need to validate it when emitting
* dummy queries in context destroy paths.
*/
vmw_bo_pin(dev_priv->dummy_query_bo, true);
dev_priv->dummy_query_bo_pinned = true;
BUG_ON(sw_context->last_query_ctx == NULL);
dev_priv->query_cid = sw_context->last_query_ctx->id;
dev_priv->query_cid_valid = true;
dev_priv->pinned_bo =
ttm_bo_reference(sw_context->cur_query_bo);
}
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
/**
* vmw_translate_mob_pointer - Prepare to translate a user-space buffer
* handle to a MOB id.
*
* @dev_priv: Pointer to a device private structure.
* @sw_context: The software context used for this command batch validation.
* @id: Pointer to the user-space handle to be translated.
* @vmw_bo_p: Points to a location that, on successful return will carry
* a reference-counted pointer to the DMA buffer identified by the
* user-space handle in @id.
*
* This function saves information needed to translate a user-space buffer
* handle to a MOB id. The translation does not take place immediately, but
* during a call to vmw_apply_relocations(). This function builds a relocation
* list and a list of buffers to validate. The former needs to be freed using
* either vmw_apply_relocations() or vmw_free_relocations(). The latter
* needs to be freed using vmw_clear_validations.
*/
static int vmw_translate_mob_ptr(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
SVGAMobId *id,
struct vmw_dma_buffer **vmw_bo_p)
{
struct vmw_dma_buffer *vmw_bo = NULL;
struct ttm_buffer_object *bo;
uint32_t handle = *id;
struct vmw_relocation *reloc;
int ret;
ret = vmw_user_dmabuf_lookup(sw_context->fp->tfile, handle, &vmw_bo);
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
if (unlikely(ret != 0)) {
DRM_ERROR("Could not find or use MOB buffer.\n");
return -EINVAL;
}
bo = &vmw_bo->base;
if (unlikely(sw_context->cur_reloc >= VMWGFX_MAX_RELOCATIONS)) {
DRM_ERROR("Max number relocations per submission"
" exceeded\n");
ret = -EINVAL;
goto out_no_reloc;
}
reloc = &sw_context->relocs[sw_context->cur_reloc++];
reloc->mob_loc = id;
reloc->location = NULL;
ret = vmw_bo_to_validate_list(sw_context, bo, true, &reloc->index);
if (unlikely(ret != 0))
goto out_no_reloc;
*vmw_bo_p = vmw_bo;
return 0;
out_no_reloc:
vmw_dmabuf_unreference(&vmw_bo);
vmw_bo_p = NULL;
return ret;
}
* vmw_translate_guest_pointer - Prepare to translate a user-space buffer
* handle to a valid SVGAGuestPtr
* @dev_priv: Pointer to a device private structure.
* @sw_context: The software context used for this command batch validation.
* @ptr: Pointer to the user-space handle to be translated.
* @vmw_bo_p: Points to a location that, on successful return will carry
* a reference-counted pointer to the DMA buffer identified by the
* user-space handle in @id.
* This function saves information needed to translate a user-space buffer
* handle to a valid SVGAGuestPtr. The translation does not take place
* immediately, but during a call to vmw_apply_relocations().
* This function builds a relocation list and a list of buffers to validate.
* The former needs to be freed using either vmw_apply_relocations() or
* vmw_free_relocations(). The latter needs to be freed using
* vmw_clear_validations.
static int vmw_translate_guest_ptr(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
SVGAGuestPtr *ptr,
struct vmw_dma_buffer **vmw_bo_p)
{
struct vmw_dma_buffer *vmw_bo = NULL;
struct ttm_buffer_object *bo;
uint32_t handle = ptr->gmrId;
struct vmw_relocation *reloc;
int ret;
ret = vmw_user_dmabuf_lookup(sw_context->fp->tfile, handle, &vmw_bo);
if (unlikely(ret != 0)) {
DRM_ERROR("Could not find or use GMR region.\n");
return -EINVAL;
}
bo = &vmw_bo->base;
if (unlikely(sw_context->cur_reloc >= VMWGFX_MAX_RELOCATIONS)) {
DRM_ERROR("Max number relocations per submission"
" exceeded\n");
ret = -EINVAL;
goto out_no_reloc;
}
reloc = &sw_context->relocs[sw_context->cur_reloc++];
reloc->location = ptr;
ret = vmw_bo_to_validate_list(sw_context, bo, false, &reloc->index);
goto out_no_reloc;
*vmw_bo_p = vmw_bo;
return 0;
out_no_reloc:
vmw_dmabuf_unreference(&vmw_bo);
vmw_bo_p = NULL;
return ret;
}
/**
* vmw_cmd_begin_gb_query - validate a SVGA_3D_CMD_BEGIN_GB_QUERY command.
*
* @dev_priv: Pointer to a device private struct.
* @sw_context: The software context used for this command submission.
* @header: Pointer to the command header in the command stream.
*/
static int vmw_cmd_begin_gb_query(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header)
{
struct vmw_begin_gb_query_cmd {
SVGA3dCmdHeader header;
SVGA3dCmdBeginGBQuery q;
} *cmd;
cmd = container_of(header, struct vmw_begin_gb_query_cmd,
header);
return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
user_context_converter, &cmd->q.cid,
NULL);
}
/**
* vmw_cmd_begin_query - validate a SVGA_3D_CMD_BEGIN_QUERY command.
*
* @dev_priv: Pointer to a device private struct.
* @sw_context: The software context used for this command submission.
* @header: Pointer to the command header in the command stream.
*/
static int vmw_cmd_begin_query(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header)