1
0
Fork 0

drm-misc-next for 5.1:

UAPI Changes:
 
 Cross-subsystem Changes:
   - Turn dma-buf fence sequence numbers into 64 bit numbers
 
 Core Changes:
   - Move to a common helper for the DP MST hotplug for radeon, i915 and
     amdgpu
   - i2c improvements for drm_dp_mst
   - Removal of drm_syncobj_cb
   - Introduction of an helper to create and attach the TV margin properties
 
 Driver Changes:
   - Improve cache flushes for v3d
   - Reflection support for vc4
   - HDMI overscan support for vc4
   - Add implicit fencing support for rockchip and sun4i
   - Switch to generic fbdev emulation for virtio
 -----BEGIN PGP SIGNATURE-----
 
 iHUEABYIAB0WIQRcEzekXsqa64kGDp7j7w1vZxhRxQUCXDOTqAAKCRDj7w1vZxhR
 xZ8QAQD4j8m9Ea3bzY5Rr8BYUx1k+Cjj6Y6abZmot2rSvdyOHwD+JzJFIFAPZjdd
 uOKhLnDlubaaoa6OGPDQShjl9p3gyQE=
 =WQGO
 -----END PGP SIGNATURE-----

Merge tag 'drm-misc-next-2019-01-07-1' of git://anongit.freedesktop.org/drm/drm-misc into drm-next

drm-misc-next for 5.1:

UAPI Changes:

Cross-subsystem Changes:
  - Turn dma-buf fence sequence numbers into 64 bit numbers

Core Changes:
  - Move to a common helper for the DP MST hotplug for radeon, i915 and
    amdgpu
  - i2c improvements for drm_dp_mst
  - Removal of drm_syncobj_cb
  - Introduction of an helper to create and attach the TV margin properties

Driver Changes:
  - Improve cache flushes for v3d
  - Reflection support for vc4
  - HDMI overscan support for vc4
  - Add implicit fencing support for rockchip and sun4i
  - Switch to generic fbdev emulation for virtio

Signed-off-by: Dave Airlie <airlied@redhat.com>

[airlied: applied amdgpu merge fixup]
From: Maxime Ripard <maxime.ripard@bootlin.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190107180333.amklwycudbsub3s5@flea
hifive-unleashed-5.1
Dave Airlie 2019-01-10 05:53:51 +10:00
commit 8c1a765bc6
69 changed files with 581 additions and 581 deletions

View File

@ -354,9 +354,6 @@ KMS cleanups
Some of these date from the very introduction of KMS in 2008 ... Some of these date from the very introduction of KMS in 2008 ...
- drm_mode_config.crtc_idr is misnamed, since it contains all KMS object. Should
be renamed to drm_mode_config.object_idr.
- drm_display_mode doesn't need to be derived from drm_mode_object. That's - drm_display_mode doesn't need to be derived from drm_mode_object. That's
leftovers from older (never merged into upstream) KMS designs where modes leftovers from older (never merged into upstream) KMS designs where modes
where set using their ID, including support to add/remove modes. where set using their ID, including support to add/remove modes.

View File

@ -4873,6 +4873,7 @@ DRM DRIVER FOR QXL VIRTUAL GPU
M: Dave Airlie <airlied@redhat.com> M: Dave Airlie <airlied@redhat.com>
M: Gerd Hoffmann <kraxel@redhat.com> M: Gerd Hoffmann <kraxel@redhat.com>
L: virtualization@lists.linux-foundation.org L: virtualization@lists.linux-foundation.org
L: spice-devel@lists.freedesktop.org
T: git git://anongit.freedesktop.org/drm/drm-misc T: git git://anongit.freedesktop.org/drm/drm-misc
S: Maintained S: Maintained
F: drivers/gpu/drm/qxl/ F: drivers/gpu/drm/qxl/
@ -4986,7 +4987,6 @@ F: Documentation/devicetree/bindings/display/atmel/
T: git git://anongit.freedesktop.org/drm/drm-misc T: git git://anongit.freedesktop.org/drm/drm-misc
DRM DRIVERS FOR BRIDGE CHIPS DRM DRIVERS FOR BRIDGE CHIPS
M: Archit Taneja <architt@codeaurora.org>
M: Andrzej Hajda <a.hajda@samsung.com> M: Andrzej Hajda <a.hajda@samsung.com>
R: Laurent Pinchart <Laurent.pinchart@ideasonboard.com> R: Laurent Pinchart <Laurent.pinchart@ideasonboard.com>
S: Maintained S: Maintained

View File

@ -1093,17 +1093,7 @@ static int dma_buf_debug_show(struct seq_file *s, void *unused)
return 0; return 0;
} }
static int dma_buf_debug_open(struct inode *inode, struct file *file) DEFINE_SHOW_ATTRIBUTE(dma_buf_debug);
{
return single_open(file, dma_buf_debug_show, NULL);
}
static const struct file_operations dma_buf_debug_fops = {
.open = dma_buf_debug_open,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
};
static struct dentry *dma_buf_debugfs_dir; static struct dentry *dma_buf_debugfs_dir;

View File

@ -649,7 +649,7 @@ EXPORT_SYMBOL(dma_fence_wait_any_timeout);
*/ */
void void
dma_fence_init(struct dma_fence *fence, const struct dma_fence_ops *ops, dma_fence_init(struct dma_fence *fence, const struct dma_fence_ops *ops,
spinlock_t *lock, u64 context, unsigned seqno) spinlock_t *lock, u64 context, u64 seqno)
{ {
BUG_ON(!lock); BUG_ON(!lock);
BUG_ON(!ops || !ops->get_driver_name || !ops->get_timeline_name); BUG_ON(!ops || !ops->get_driver_name || !ops->get_timeline_name);

View File

@ -172,7 +172,7 @@ static bool timeline_fence_enable_signaling(struct dma_fence *fence)
static void timeline_fence_value_str(struct dma_fence *fence, static void timeline_fence_value_str(struct dma_fence *fence,
char *str, int size) char *str, int size)
{ {
snprintf(str, size, "%d", fence->seqno); snprintf(str, size, "%lld", fence->seqno);
} }
static void timeline_fence_timeline_value_str(struct dma_fence *fence, static void timeline_fence_timeline_value_str(struct dma_fence *fence,

View File

@ -147,7 +147,7 @@ static void sync_print_sync_file(struct seq_file *s,
} }
} }
static int sync_debugfs_show(struct seq_file *s, void *unused) static int sync_info_debugfs_show(struct seq_file *s, void *unused)
{ {
struct list_head *pos; struct list_head *pos;
@ -178,17 +178,7 @@ static int sync_debugfs_show(struct seq_file *s, void *unused)
return 0; return 0;
} }
static int sync_info_debugfs_open(struct inode *inode, struct file *file) DEFINE_SHOW_ATTRIBUTE(sync_info_debugfs);
{
return single_open(file, sync_debugfs_show, inode->i_private);
}
static const struct file_operations sync_info_debugfs_fops = {
.open = sync_info_debugfs_open,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
};
static __init int sync_debugfs_init(void) static __init int sync_debugfs_init(void)
{ {
@ -218,7 +208,7 @@ void sync_dump(void)
}; };
int i; int i;
sync_debugfs_show(&s, NULL); sync_info_debugfs_show(&s, NULL);
for (i = 0; i < s.count; i += DUMP_CHUNK) { for (i = 0; i < s.count; i += DUMP_CHUNK) {
if ((s.count - i) > DUMP_CHUNK) { if ((s.count - i) > DUMP_CHUNK) {

View File

@ -144,7 +144,7 @@ char *sync_file_get_name(struct sync_file *sync_file, char *buf, int len)
} else { } else {
struct dma_fence *fence = sync_file->fence; struct dma_fence *fence = sync_file->fence;
snprintf(buf, len, "%s-%s%llu-%d", snprintf(buf, len, "%s-%s%llu-%lld",
fence->ops->get_driver_name(fence), fence->ops->get_driver_name(fence),
fence->ops->get_timeline_name(fence), fence->ops->get_timeline_name(fence),
fence->context, fence->context,
@ -258,7 +258,7 @@ static struct sync_file *sync_file_merge(const char *name, struct sync_file *a,
i_b++; i_b++;
} else { } else {
if (pt_a->seqno - pt_b->seqno <= INT_MAX) if (__dma_fence_is_later(pt_a->seqno, pt_b->seqno))
add_fence(fences, &i, pt_a); add_fence(fences, &i, pt_a);
else else
add_fence(fences, &i, pt_b); add_fence(fences, &i, pt_b);

View File

@ -388,7 +388,7 @@ void amdgpu_sa_bo_dump_debug_info(struct amdgpu_sa_manager *sa_manager,
soffset, eoffset, eoffset - soffset); soffset, eoffset, eoffset - soffset);
if (i->fence) if (i->fence)
seq_printf(m, " protected by 0x%08x on context %llu", seq_printf(m, " protected by 0x%016llx on context %llu",
i->fence->seqno, i->fence->context); i->fence->seqno, i->fence->context);
seq_printf(m, "\n"); seq_printf(m, "\n");

View File

@ -1692,7 +1692,8 @@ static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
dc_resource_state_copy_construct_current(adev->dm.dc, state->context); dc_resource_state_copy_construct_current(adev->dm.dc, state->context);
drm_atomic_private_obj_init(&adev->dm.atomic_obj, drm_atomic_private_obj_init(adev->ddev,
&adev->dm.atomic_obj,
&state->base, &state->base,
&dm_atomic_state_funcs); &dm_atomic_state_funcs);

View File

@ -395,14 +395,6 @@ static void dm_dp_destroy_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
drm_connector_put(connector); drm_connector_put(connector);
} }
static void dm_dp_mst_hotplug(struct drm_dp_mst_topology_mgr *mgr)
{
struct amdgpu_dm_connector *master = container_of(mgr, struct amdgpu_dm_connector, mst_mgr);
struct drm_device *dev = master->base.dev;
drm_kms_helper_hotplug_event(dev);
}
static void dm_dp_mst_register_connector(struct drm_connector *connector) static void dm_dp_mst_register_connector(struct drm_connector *connector)
{ {
struct drm_device *dev = connector->dev; struct drm_device *dev = connector->dev;
@ -419,7 +411,6 @@ static void dm_dp_mst_register_connector(struct drm_connector *connector)
static const struct drm_dp_mst_topology_cbs dm_mst_cbs = { static const struct drm_dp_mst_topology_cbs dm_mst_cbs = {
.add_connector = dm_dp_add_mst_connector, .add_connector = dm_dp_add_mst_connector,
.destroy_connector = dm_dp_destroy_mst_connector, .destroy_connector = dm_dp_destroy_mst_connector,
.hotplug = dm_dp_mst_hotplug,
.register_connector = dm_dp_mst_register_connector .register_connector = dm_dp_mst_register_connector
}; };

View File

@ -191,7 +191,6 @@ static int astfb_create(struct drm_fb_helper *helper,
int size, ret; int size, ret;
void *sysram; void *sysram;
struct drm_gem_object *gobj = NULL; struct drm_gem_object *gobj = NULL;
struct ast_bo *bo = NULL;
mode_cmd.width = sizes->surface_width; mode_cmd.width = sizes->surface_width;
mode_cmd.height = sizes->surface_height; mode_cmd.height = sizes->surface_height;
mode_cmd.pitches[0] = mode_cmd.width * ((sizes->surface_bpp + 7)/8); mode_cmd.pitches[0] = mode_cmd.width * ((sizes->surface_bpp + 7)/8);
@ -206,7 +205,6 @@ static int astfb_create(struct drm_fb_helper *helper,
DRM_ERROR("failed to create fbcon backing object %d\n", ret); DRM_ERROR("failed to create fbcon backing object %d\n", ret);
return ret; return ret;
} }
bo = gem_to_ast_bo(gobj);
sysram = vmalloc(size); sysram = vmalloc(size);
if (!sysram) if (!sysram)

View File

@ -103,7 +103,7 @@ int drm_ati_pcigart_init(struct drm_device *dev, struct drm_ati_pcigart_info *ga
unsigned long pages; unsigned long pages;
u32 *pci_gart = NULL, page_base, gart_idx; u32 *pci_gart = NULL, page_base, gart_idx;
dma_addr_t bus_address = 0; dma_addr_t bus_address = 0;
int i, j, ret = 0; int i, j, ret = -ENOMEM;
int max_ati_pages, max_real_pages; int max_ati_pages, max_real_pages;
if (!entry) { if (!entry) {
@ -117,7 +117,7 @@ int drm_ati_pcigart_init(struct drm_device *dev, struct drm_ati_pcigart_info *ga
if (pci_set_dma_mask(dev->pdev, gart_info->table_mask)) { if (pci_set_dma_mask(dev->pdev, gart_info->table_mask)) {
DRM_ERROR("fail to set dma mask to 0x%Lx\n", DRM_ERROR("fail to set dma mask to 0x%Lx\n",
(unsigned long long)gart_info->table_mask); (unsigned long long)gart_info->table_mask);
ret = 1; ret = -EFAULT;
goto done; goto done;
} }
@ -160,6 +160,7 @@ int drm_ati_pcigart_init(struct drm_device *dev, struct drm_ati_pcigart_info *ga
drm_ati_pcigart_cleanup(dev, gart_info); drm_ati_pcigart_cleanup(dev, gart_info);
address = NULL; address = NULL;
bus_address = 0; bus_address = 0;
ret = -ENOMEM;
goto done; goto done;
} }
page_base = (u32) entry->busaddr[i]; page_base = (u32) entry->busaddr[i];
@ -188,7 +189,7 @@ int drm_ati_pcigart_init(struct drm_device *dev, struct drm_ati_pcigart_info *ga
page_base += ATI_PCIGART_PAGE_SIZE; page_base += ATI_PCIGART_PAGE_SIZE;
} }
} }
ret = 1; ret = 0;
#if defined(__i386__) || defined(__x86_64__) #if defined(__i386__) || defined(__x86_64__)
wbinvd(); wbinvd();

View File

@ -86,9 +86,16 @@ static int bochs_get_edid_block(void *data, u8 *buf,
int bochs_hw_load_edid(struct bochs_device *bochs) int bochs_hw_load_edid(struct bochs_device *bochs)
{ {
u8 header[8];
if (!bochs->mmio) if (!bochs->mmio)
return -1; return -1;
/* check header to detect whenever edid support is enabled in qemu */
bochs_get_edid_block(bochs, header, 0, ARRAY_SIZE(header));
if (drm_edid_header_is_valid(header) != 8)
return -1;
kfree(bochs->edid); kfree(bochs->edid);
bochs->edid = drm_do_get_edid(&bochs->connector, bochs->edid = drm_do_get_edid(&bochs->connector,
bochs_get_edid_block, bochs); bochs_get_edid_block, bochs);

View File

@ -1,12 +1,9 @@
// SPDX-License-Identifier: GPL-2.0
/* /*
* dw-hdmi-i2s-audio.c * dw-hdmi-i2s-audio.c
* *
* Copyright (c) 2017 Renesas Solutions Corp. * Copyright (c) 2017 Renesas Solutions Corp.
* Kuninori Morimoto <kuninori.morimoto.gx@renesas.com> * Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/ */
#include <drm/bridge/dw_hdmi.h> #include <drm/bridge/dw_hdmi.h>

View File

@ -698,6 +698,7 @@ static void drm_atomic_plane_print_state(struct drm_printer *p,
/** /**
* drm_atomic_private_obj_init - initialize private object * drm_atomic_private_obj_init - initialize private object
* @dev: DRM device this object will be attached to
* @obj: private object * @obj: private object
* @state: initial private object state * @state: initial private object state
* @funcs: pointer to the struct of function pointers that identify the object * @funcs: pointer to the struct of function pointers that identify the object
@ -707,14 +708,18 @@ static void drm_atomic_plane_print_state(struct drm_printer *p,
* driver private object that needs its own atomic state. * driver private object that needs its own atomic state.
*/ */
void void
drm_atomic_private_obj_init(struct drm_private_obj *obj, drm_atomic_private_obj_init(struct drm_device *dev,
struct drm_private_obj *obj,
struct drm_private_state *state, struct drm_private_state *state,
const struct drm_private_state_funcs *funcs) const struct drm_private_state_funcs *funcs)
{ {
memset(obj, 0, sizeof(*obj)); memset(obj, 0, sizeof(*obj));
drm_modeset_lock_init(&obj->lock);
obj->state = state; obj->state = state;
obj->funcs = funcs; obj->funcs = funcs;
list_add_tail(&obj->head, &dev->mode_config.privobj_list);
} }
EXPORT_SYMBOL(drm_atomic_private_obj_init); EXPORT_SYMBOL(drm_atomic_private_obj_init);
@ -727,7 +732,9 @@ EXPORT_SYMBOL(drm_atomic_private_obj_init);
void void
drm_atomic_private_obj_fini(struct drm_private_obj *obj) drm_atomic_private_obj_fini(struct drm_private_obj *obj)
{ {
list_del(&obj->head);
obj->funcs->atomic_destroy_state(obj, obj->state); obj->funcs->atomic_destroy_state(obj, obj->state);
drm_modeset_lock_fini(&obj->lock);
} }
EXPORT_SYMBOL(drm_atomic_private_obj_fini); EXPORT_SYMBOL(drm_atomic_private_obj_fini);
@ -737,8 +744,8 @@ EXPORT_SYMBOL(drm_atomic_private_obj_fini);
* @obj: private object to get the state for * @obj: private object to get the state for
* *
* This function returns the private object state for the given private object, * This function returns the private object state for the given private object,
* allocating the state if needed. It does not grab any locks as the caller is * allocating the state if needed. It will also grab the relevant private
* expected to care of any required locking. * object lock to make sure that the state is consistent.
* *
* RETURNS: * RETURNS:
* *
@ -748,7 +755,7 @@ struct drm_private_state *
drm_atomic_get_private_obj_state(struct drm_atomic_state *state, drm_atomic_get_private_obj_state(struct drm_atomic_state *state,
struct drm_private_obj *obj) struct drm_private_obj *obj)
{ {
int index, num_objs, i; int index, num_objs, i, ret;
size_t size; size_t size;
struct __drm_private_objs_state *arr; struct __drm_private_objs_state *arr;
struct drm_private_state *obj_state; struct drm_private_state *obj_state;
@ -757,6 +764,10 @@ drm_atomic_get_private_obj_state(struct drm_atomic_state *state,
if (obj == state->private_objs[i].ptr) if (obj == state->private_objs[i].ptr)
return state->private_objs[i].state; return state->private_objs[i].state;
ret = drm_modeset_lock(&obj->lock, state->acquire_ctx);
if (ret)
return ERR_PTR(ret);
num_objs = state->num_private_objs + 1; num_objs = state->num_private_objs + 1;
size = sizeof(*state->private_objs) * num_objs; size = sizeof(*state->private_objs) * num_objs;
arr = krealloc(state->private_objs, size, GFP_KERNEL); arr = krealloc(state->private_objs, size, GFP_KERNEL);

View File

@ -377,6 +377,17 @@ int drm_legacy_addmap(struct drm_device *dev, resource_size_t offset,
} }
EXPORT_SYMBOL(drm_legacy_addmap); EXPORT_SYMBOL(drm_legacy_addmap);
struct drm_local_map *drm_legacy_findmap(struct drm_device *dev,
unsigned int token)
{
struct drm_map_list *_entry;
list_for_each_entry(_entry, &dev->maplist, head)
if (_entry->user_token == token)
return _entry->map;
return NULL;
}
EXPORT_SYMBOL(drm_legacy_findmap);
/** /**
* Ioctl to specify a range of memory that is available for mapping by a * Ioctl to specify a range of memory that is available for mapping by a
* non-root process. * non-root process.

View File

@ -1138,7 +1138,71 @@ void drm_hdmi_avi_infoframe_content_type(struct hdmi_avi_infoframe *frame,
EXPORT_SYMBOL(drm_hdmi_avi_infoframe_content_type); EXPORT_SYMBOL(drm_hdmi_avi_infoframe_content_type);
/** /**
* drm_create_tv_properties - create TV specific connector properties * drm_mode_attach_tv_margin_properties - attach TV connector margin properties
* @connector: DRM connector
*
* Called by a driver when it needs to attach TV margin props to a connector.
* Typically used on SDTV and HDMI connectors.
*/
void drm_connector_attach_tv_margin_properties(struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
drm_object_attach_property(&connector->base,
dev->mode_config.tv_left_margin_property,
0);
drm_object_attach_property(&connector->base,
dev->mode_config.tv_right_margin_property,
0);
drm_object_attach_property(&connector->base,
dev->mode_config.tv_top_margin_property,
0);
drm_object_attach_property(&connector->base,
dev->mode_config.tv_bottom_margin_property,
0);
}
EXPORT_SYMBOL(drm_connector_attach_tv_margin_properties);
/**
* drm_mode_create_tv_margin_properties - create TV connector margin properties
* @dev: DRM device
*
* Called by a driver's HDMI connector initialization routine, this function
* creates the TV margin properties for a given device. No need to call this
* function for an SDTV connector, it's already called from
* drm_mode_create_tv_properties().
*/
int drm_mode_create_tv_margin_properties(struct drm_device *dev)
{
if (dev->mode_config.tv_left_margin_property)
return 0;
dev->mode_config.tv_left_margin_property =
drm_property_create_range(dev, 0, "left margin", 0, 100);
if (!dev->mode_config.tv_left_margin_property)
return -ENOMEM;
dev->mode_config.tv_right_margin_property =
drm_property_create_range(dev, 0, "right margin", 0, 100);
if (!dev->mode_config.tv_right_margin_property)
return -ENOMEM;
dev->mode_config.tv_top_margin_property =
drm_property_create_range(dev, 0, "top margin", 0, 100);
if (!dev->mode_config.tv_top_margin_property)
return -ENOMEM;
dev->mode_config.tv_bottom_margin_property =
drm_property_create_range(dev, 0, "bottom margin", 0, 100);
if (!dev->mode_config.tv_bottom_margin_property)
return -ENOMEM;
return 0;
}
EXPORT_SYMBOL(drm_mode_create_tv_margin_properties);
/**
* drm_mode_create_tv_properties - create TV specific connector properties
* @dev: DRM device * @dev: DRM device
* @num_modes: number of different TV formats (modes) supported * @num_modes: number of different TV formats (modes) supported
* @modes: array of pointers to strings containing name of each format * @modes: array of pointers to strings containing name of each format
@ -1183,24 +1247,7 @@ int drm_mode_create_tv_properties(struct drm_device *dev,
/* /*
* Other, TV specific properties: margins & TV modes. * Other, TV specific properties: margins & TV modes.
*/ */
dev->mode_config.tv_left_margin_property = if (drm_mode_create_tv_margin_properties(dev))
drm_property_create_range(dev, 0, "left margin", 0, 100);
if (!dev->mode_config.tv_left_margin_property)
goto nomem;
dev->mode_config.tv_right_margin_property =
drm_property_create_range(dev, 0, "right margin", 0, 100);
if (!dev->mode_config.tv_right_margin_property)
goto nomem;
dev->mode_config.tv_top_margin_property =
drm_property_create_range(dev, 0, "top margin", 0, 100);
if (!dev->mode_config.tv_top_margin_property)
goto nomem;
dev->mode_config.tv_bottom_margin_property =
drm_property_create_range(dev, 0, "bottom margin", 0, 100);
if (!dev->mode_config.tv_bottom_margin_property)
goto nomem; goto nomem;
dev->mode_config.tv_mode_property = dev->mode_config.tv_mode_property =
@ -2077,7 +2124,7 @@ EXPORT_SYMBOL(drm_mode_get_tile_group);
* identifier for the tile group. * identifier for the tile group.
* *
* RETURNS: * RETURNS:
* new tile group or error. * new tile group or NULL.
*/ */
struct drm_tile_group *drm_mode_create_tile_group(struct drm_device *dev, struct drm_tile_group *drm_mode_create_tile_group(struct drm_device *dev,
char topology[8]) char topology[8])
@ -2087,7 +2134,7 @@ struct drm_tile_group *drm_mode_create_tile_group(struct drm_device *dev,
tg = kzalloc(sizeof(*tg), GFP_KERNEL); tg = kzalloc(sizeof(*tg), GFP_KERNEL);
if (!tg) if (!tg)
return ERR_PTR(-ENOMEM); return NULL;
kref_init(&tg->refcount); kref_init(&tg->refcount);
memcpy(tg->group_data, topology, 8); memcpy(tg->group_data, topology, 8);
@ -2099,7 +2146,7 @@ struct drm_tile_group *drm_mode_create_tile_group(struct drm_device *dev,
tg->id = ret; tg->id = ret;
} else { } else {
kfree(tg); kfree(tg);
tg = ERR_PTR(ret); tg = NULL;
} }
mutex_unlock(&dev->mode_config.idr_mutex); mutex_unlock(&dev->mode_config.idr_mutex);

View File

@ -361,23 +361,26 @@ int drm_legacy_addctx(struct drm_device *dev, void *data,
{ {
struct drm_ctx_list *ctx_entry; struct drm_ctx_list *ctx_entry;
struct drm_ctx *ctx = data; struct drm_ctx *ctx = data;
int tmp_handle;
if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT) && if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT) &&
!drm_core_check_feature(dev, DRIVER_LEGACY)) !drm_core_check_feature(dev, DRIVER_LEGACY))
return -EOPNOTSUPP; return -EOPNOTSUPP;
ctx->handle = drm_legacy_ctxbitmap_next(dev); tmp_handle = drm_legacy_ctxbitmap_next(dev);
if (ctx->handle == DRM_KERNEL_CONTEXT) { if (tmp_handle == DRM_KERNEL_CONTEXT) {
/* Skip kernel's context and get a new one. */ /* Skip kernel's context and get a new one. */
ctx->handle = drm_legacy_ctxbitmap_next(dev); tmp_handle = drm_legacy_ctxbitmap_next(dev);
} }
DRM_DEBUG("%d\n", ctx->handle); DRM_DEBUG("%d\n", tmp_handle);
if (ctx->handle < 0) { if (tmp_handle < 0) {
DRM_DEBUG("Not enough free contexts.\n"); DRM_DEBUG("Not enough free contexts.\n");
/* Should this return -EBUSY instead? */ /* Should this return -EBUSY instead? */
return -ENOMEM; return tmp_handle;
} }
ctx->handle = tmp_handle;
ctx_entry = kmalloc(sizeof(*ctx_entry), GFP_KERNEL); ctx_entry = kmalloc(sizeof(*ctx_entry), GFP_KERNEL);
if (!ctx_entry) { if (!ctx_entry) {
DRM_DEBUG("out of memory\n"); DRM_DEBUG("out of memory\n");

View File

@ -33,6 +33,7 @@
#include <drm/drm_fixed.h> #include <drm/drm_fixed.h>
#include <drm/drm_atomic.h> #include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h> #include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc_helper.h>
/** /**
* DOC: dp mst helper * DOC: dp mst helper
@ -1639,7 +1640,7 @@ static void drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
for (i = 0; i < txmsg->reply.u.link_addr.nports; i++) { for (i = 0; i < txmsg->reply.u.link_addr.nports; i++) {
drm_dp_add_port(mstb, mgr->dev, &txmsg->reply.u.link_addr.ports[i]); drm_dp_add_port(mstb, mgr->dev, &txmsg->reply.u.link_addr.ports[i]);
} }
(*mgr->cbs->hotplug)(mgr); drm_kms_helper_hotplug_event(mgr->dev);
} }
} else { } else {
mstb->link_address_sent = false; mstb->link_address_sent = false;
@ -1878,41 +1879,48 @@ int drm_dp_update_payload_part1(struct drm_dp_mst_topology_mgr *mgr)
mutex_lock(&mgr->payload_lock); mutex_lock(&mgr->payload_lock);
for (i = 0; i < mgr->max_payloads; i++) { for (i = 0; i < mgr->max_payloads; i++) {
struct drm_dp_vcpi *vcpi = mgr->proposed_vcpis[i];
struct drm_dp_payload *payload = &mgr->payloads[i];
/* solve the current payloads - compare to the hw ones /* solve the current payloads - compare to the hw ones
- update the hw view */ - update the hw view */
req_payload.start_slot = cur_slots; req_payload.start_slot = cur_slots;
if (mgr->proposed_vcpis[i]) { if (vcpi) {
port = container_of(mgr->proposed_vcpis[i], struct drm_dp_mst_port, vcpi); port = container_of(vcpi, struct drm_dp_mst_port,
vcpi);
port = drm_dp_get_validated_port_ref(mgr, port); port = drm_dp_get_validated_port_ref(mgr, port);
if (!port) { if (!port) {
mutex_unlock(&mgr->payload_lock); mutex_unlock(&mgr->payload_lock);
return -EINVAL; return -EINVAL;
} }
req_payload.num_slots = mgr->proposed_vcpis[i]->num_slots; req_payload.num_slots = vcpi->num_slots;
req_payload.vcpi = mgr->proposed_vcpis[i]->vcpi; req_payload.vcpi = vcpi->vcpi;
} else { } else {
port = NULL; port = NULL;
req_payload.num_slots = 0; req_payload.num_slots = 0;
} }
if (mgr->payloads[i].start_slot != req_payload.start_slot) { payload->start_slot = req_payload.start_slot;
mgr->payloads[i].start_slot = req_payload.start_slot;
}
/* work out what is required to happen with this payload */ /* work out what is required to happen with this payload */
if (mgr->payloads[i].num_slots != req_payload.num_slots) { if (payload->num_slots != req_payload.num_slots) {
/* need to push an update for this payload */ /* need to push an update for this payload */
if (req_payload.num_slots) { if (req_payload.num_slots) {
drm_dp_create_payload_step1(mgr, mgr->proposed_vcpis[i]->vcpi, &req_payload); drm_dp_create_payload_step1(mgr, vcpi->vcpi,
mgr->payloads[i].num_slots = req_payload.num_slots; &req_payload);
mgr->payloads[i].vcpi = req_payload.vcpi; payload->num_slots = req_payload.num_slots;
} else if (mgr->payloads[i].num_slots) { payload->vcpi = req_payload.vcpi;
mgr->payloads[i].num_slots = 0;
drm_dp_destroy_payload_step1(mgr, port, mgr->payloads[i].vcpi, &mgr->payloads[i]); } else if (payload->num_slots) {
req_payload.payload_state = mgr->payloads[i].payload_state; payload->num_slots = 0;
mgr->payloads[i].start_slot = 0; drm_dp_destroy_payload_step1(mgr, port,
payload->vcpi,
payload);
req_payload.payload_state =
payload->payload_state;
payload->start_slot = 0;
} }
mgr->payloads[i].payload_state = req_payload.payload_state; payload->payload_state = req_payload.payload_state;
} }
cur_slots += req_payload.num_slots; cur_slots += req_payload.num_slots;
@ -1921,22 +1929,26 @@ int drm_dp_update_payload_part1(struct drm_dp_mst_topology_mgr *mgr)
} }
for (i = 0; i < mgr->max_payloads; i++) { for (i = 0; i < mgr->max_payloads; i++) {
if (mgr->payloads[i].payload_state == DP_PAYLOAD_DELETE_LOCAL) { if (mgr->payloads[i].payload_state != DP_PAYLOAD_DELETE_LOCAL)
DRM_DEBUG_KMS("removing payload %d\n", i); continue;
for (j = i; j < mgr->max_payloads - 1; j++) {
memcpy(&mgr->payloads[j], &mgr->payloads[j + 1], sizeof(struct drm_dp_payload));
mgr->proposed_vcpis[j] = mgr->proposed_vcpis[j + 1];
if (mgr->proposed_vcpis[j] && mgr->proposed_vcpis[j]->num_slots) {
set_bit(j + 1, &mgr->payload_mask);
} else {
clear_bit(j + 1, &mgr->payload_mask);
}
}
memset(&mgr->payloads[mgr->max_payloads - 1], 0, sizeof(struct drm_dp_payload));
mgr->proposed_vcpis[mgr->max_payloads - 1] = NULL;
clear_bit(mgr->max_payloads, &mgr->payload_mask);
DRM_DEBUG_KMS("removing payload %d\n", i);
for (j = i; j < mgr->max_payloads - 1; j++) {
mgr->payloads[j] = mgr->payloads[j + 1];
mgr->proposed_vcpis[j] = mgr->proposed_vcpis[j + 1];
if (mgr->proposed_vcpis[j] &&
mgr->proposed_vcpis[j]->num_slots) {
set_bit(j + 1, &mgr->payload_mask);
} else {
clear_bit(j + 1, &mgr->payload_mask);
}
} }
memset(&mgr->payloads[mgr->max_payloads - 1], 0,
sizeof(struct drm_dp_payload));
mgr->proposed_vcpis[mgr->max_payloads - 1] = NULL;
clear_bit(mgr->max_payloads, &mgr->payload_mask);
} }
mutex_unlock(&mgr->payload_lock); mutex_unlock(&mgr->payload_lock);
@ -2412,7 +2424,7 @@ static int drm_dp_mst_handle_up_req(struct drm_dp_mst_topology_mgr *mgr)
drm_dp_update_port(mstb, &msg.u.conn_stat); drm_dp_update_port(mstb, &msg.u.conn_stat);
DRM_DEBUG_KMS("Got CSN: pn: %d ldps:%d ddps: %d mcs: %d ip: %d pdt: %d\n", msg.u.conn_stat.port_number, msg.u.conn_stat.legacy_device_plug_status, msg.u.conn_stat.displayport_device_plug_status, msg.u.conn_stat.message_capability_status, msg.u.conn_stat.input_port, msg.u.conn_stat.peer_device_type); DRM_DEBUG_KMS("Got CSN: pn: %d ldps:%d ddps: %d mcs: %d ip: %d pdt: %d\n", msg.u.conn_stat.port_number, msg.u.conn_stat.legacy_device_plug_status, msg.u.conn_stat.displayport_device_plug_status, msg.u.conn_stat.message_capability_status, msg.u.conn_stat.input_port, msg.u.conn_stat.peer_device_type);
(*mgr->cbs->hotplug)(mgr); drm_kms_helper_hotplug_event(mgr->dev);
} else if (msg.req_type == DP_RESOURCE_STATUS_NOTIFY) { } else if (msg.req_type == DP_RESOURCE_STATUS_NOTIFY) {
drm_dp_send_up_ack_reply(mgr, mgr->mst_primary, msg.req_type, seqno, false); drm_dp_send_up_ack_reply(mgr, mgr->mst_primary, msg.req_type, seqno, false);
@ -3109,7 +3121,7 @@ static void drm_dp_destroy_connector_work(struct work_struct *work)
send_hotplug = true; send_hotplug = true;
} }
if (send_hotplug) if (send_hotplug)
(*mgr->cbs->hotplug)(mgr); drm_kms_helper_hotplug_event(mgr->dev);
} }
static struct drm_private_state * static struct drm_private_state *
@ -3220,7 +3232,7 @@ int drm_dp_mst_topology_mgr_init(struct drm_dp_mst_topology_mgr *mgr,
/* max. time slots - one slot for MTP header */ /* max. time slots - one slot for MTP header */
mst_state->avail_slots = 63; mst_state->avail_slots = 63;
drm_atomic_private_obj_init(&mgr->base, drm_atomic_private_obj_init(dev, &mgr->base,
&mst_state->base, &mst_state->base,
&mst_state_funcs); &mst_state_funcs);
@ -3234,6 +3246,7 @@ EXPORT_SYMBOL(drm_dp_mst_topology_mgr_init);
*/ */
void drm_dp_mst_topology_mgr_destroy(struct drm_dp_mst_topology_mgr *mgr) void drm_dp_mst_topology_mgr_destroy(struct drm_dp_mst_topology_mgr *mgr)
{ {
drm_dp_mst_topology_mgr_set_mst(mgr, false);
flush_work(&mgr->work); flush_work(&mgr->work);
flush_work(&mgr->destroy_connector_work); flush_work(&mgr->destroy_connector_work);
mutex_lock(&mgr->payload_lock); mutex_lock(&mgr->payload_lock);
@ -3249,6 +3262,23 @@ void drm_dp_mst_topology_mgr_destroy(struct drm_dp_mst_topology_mgr *mgr)
} }
EXPORT_SYMBOL(drm_dp_mst_topology_mgr_destroy); EXPORT_SYMBOL(drm_dp_mst_topology_mgr_destroy);
static bool remote_i2c_read_ok(const struct i2c_msg msgs[], int num)
{
int i;
if (num - 1 > DP_REMOTE_I2C_READ_MAX_TRANSACTIONS)
return false;
for (i = 0; i < num - 1; i++) {
if (msgs[i].flags & I2C_M_RD ||
msgs[i].len > 0xff)
return false;
}
return msgs[num - 1].flags & I2C_M_RD &&
msgs[num - 1].len <= 0xff;
}
/* I2C device */ /* I2C device */
static int drm_dp_mst_i2c_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs, static int drm_dp_mst_i2c_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs,
int num) int num)
@ -3258,7 +3288,6 @@ static int drm_dp_mst_i2c_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs
struct drm_dp_mst_branch *mstb; struct drm_dp_mst_branch *mstb;
struct drm_dp_mst_topology_mgr *mgr = port->mgr; struct drm_dp_mst_topology_mgr *mgr = port->mgr;
unsigned int i; unsigned int i;
bool reading = false;
struct drm_dp_sideband_msg_req_body msg; struct drm_dp_sideband_msg_req_body msg;
struct drm_dp_sideband_msg_tx *txmsg = NULL; struct drm_dp_sideband_msg_tx *txmsg = NULL;
int ret; int ret;
@ -3267,12 +3296,7 @@ static int drm_dp_mst_i2c_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs
if (!mstb) if (!mstb)
return -EREMOTEIO; return -EREMOTEIO;
/* construct i2c msg */ if (!remote_i2c_read_ok(msgs, num)) {
/* see if last msg is a read */
if (msgs[num - 1].flags & I2C_M_RD)
reading = true;
if (!reading || (num - 1 > DP_REMOTE_I2C_READ_MAX_TRANSACTIONS)) {
DRM_DEBUG_KMS("Unsupported I2C transaction for MST device\n"); DRM_DEBUG_KMS("Unsupported I2C transaction for MST device\n");
ret = -EIO; ret = -EIO;
goto out; goto out;
@ -3286,6 +3310,7 @@ static int drm_dp_mst_i2c_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs
msg.u.i2c_read.transactions[i].i2c_dev_id = msgs[i].addr; msg.u.i2c_read.transactions[i].i2c_dev_id = msgs[i].addr;
msg.u.i2c_read.transactions[i].num_bytes = msgs[i].len; msg.u.i2c_read.transactions[i].num_bytes = msgs[i].len;
msg.u.i2c_read.transactions[i].bytes = msgs[i].buf; msg.u.i2c_read.transactions[i].bytes = msgs[i].buf;
msg.u.i2c_read.transactions[i].no_stop_bit = !(msgs[i].flags & I2C_M_STOP);
} }
msg.u.i2c_read.read_i2c_device_id = msgs[num - 1].addr; msg.u.i2c_read.read_i2c_device_id = msgs[num - 1].addr;
msg.u.i2c_read.num_bytes_read = msgs[num - 1].len; msg.u.i2c_read.num_bytes_read = msgs[num - 1].len;

View File

@ -41,7 +41,6 @@
#include "drm_crtc_internal.h" #include "drm_crtc_internal.h"
#include "drm_legacy.h" #include "drm_legacy.h"
#include "drm_internal.h" #include "drm_internal.h"
#include "drm_crtc_internal.h"
/* /*
* drm_debug: Enable debug output. * drm_debug: Enable debug output.

View File

@ -26,6 +26,8 @@
#define DRM_IF_MAJOR 1 #define DRM_IF_MAJOR 1
#define DRM_IF_MINOR 4 #define DRM_IF_MINOR 4
#define DRM_IF_VERSION(maj, min) (maj << 16 | min)
struct drm_prime_file_private; struct drm_prime_file_private;
struct dma_buf; struct dma_buf;

View File

@ -218,7 +218,7 @@ static struct drm_master *drm_lease_create(struct drm_master *lessor, struct idr
idr_for_each_entry(leases, entry, object) { idr_for_each_entry(leases, entry, object) {
error = 0; error = 0;
if (!idr_find(&dev->mode_config.crtc_idr, object)) if (!idr_find(&dev->mode_config.object_idr, object))
error = -ENOENT; error = -ENOENT;
else if (!_drm_lease_held_master(lessor, object)) else if (!_drm_lease_held_master(lessor, object))
error = -EACCES; error = -EACCES;
@ -439,7 +439,7 @@ static int fill_object_idr(struct drm_device *dev,
/* /*
* We're using an IDR to hold the set of leased * We're using an IDR to hold the set of leased
* objects, but we don't need to point at the object's * objects, but we don't need to point at the object's
* data structure from the lease as the main crtc_idr * data structure from the lease as the main object_idr
* will be used to actually find that. Instead, all we * will be used to actually find that. Instead, all we
* really want is a 'leased/not-leased' result, for * really want is a 'leased/not-leased' result, for
* which any non-NULL pointer will work fine. * which any non-NULL pointer will work fine.
@ -687,7 +687,7 @@ int drm_mode_get_lease_ioctl(struct drm_device *dev,
if (lessee->lessor == NULL) if (lessee->lessor == NULL)
/* owner can use all objects */ /* owner can use all objects */
object_idr = &lessee->dev->mode_config.crtc_idr; object_idr = &lessee->dev->mode_config.object_idr;
else else
/* lessee can only use allowed object */ /* lessee can only use allowed object */
object_idr = &lessee->leases; object_idr = &lessee->leases;

View File

@ -393,7 +393,8 @@ void drm_mode_config_init(struct drm_device *dev)
INIT_LIST_HEAD(&dev->mode_config.property_list); INIT_LIST_HEAD(&dev->mode_config.property_list);
INIT_LIST_HEAD(&dev->mode_config.property_blob_list); INIT_LIST_HEAD(&dev->mode_config.property_blob_list);
INIT_LIST_HEAD(&dev->mode_config.plane_list); INIT_LIST_HEAD(&dev->mode_config.plane_list);
idr_init(&dev->mode_config.crtc_idr); INIT_LIST_HEAD(&dev->mode_config.privobj_list);
idr_init(&dev->mode_config.object_idr);
idr_init(&dev->mode_config.tile_idr); idr_init(&dev->mode_config.tile_idr);
ida_init(&dev->mode_config.connector_ida); ida_init(&dev->mode_config.connector_ida);
spin_lock_init(&dev->mode_config.connector_list_lock); spin_lock_init(&dev->mode_config.connector_list_lock);
@ -496,7 +497,7 @@ void drm_mode_config_cleanup(struct drm_device *dev)
ida_destroy(&dev->mode_config.connector_ida); ida_destroy(&dev->mode_config.connector_ida);
idr_destroy(&dev->mode_config.tile_idr); idr_destroy(&dev->mode_config.tile_idr);
idr_destroy(&dev->mode_config.crtc_idr); idr_destroy(&dev->mode_config.object_idr);
drm_modeset_lock_fini(&dev->mode_config.connection_mutex); drm_modeset_lock_fini(&dev->mode_config.connection_mutex);
} }
EXPORT_SYMBOL(drm_mode_config_cleanup); EXPORT_SYMBOL(drm_mode_config_cleanup);

View File

@ -38,7 +38,7 @@ int __drm_mode_object_add(struct drm_device *dev, struct drm_mode_object *obj,
int ret; int ret;
mutex_lock(&dev->mode_config.idr_mutex); mutex_lock(&dev->mode_config.idr_mutex);
ret = idr_alloc(&dev->mode_config.crtc_idr, register_obj ? obj : NULL, ret = idr_alloc(&dev->mode_config.object_idr, register_obj ? obj : NULL,
1, 0, GFP_KERNEL); 1, 0, GFP_KERNEL);
if (ret >= 0) { if (ret >= 0) {
/* /*
@ -79,7 +79,7 @@ void drm_mode_object_register(struct drm_device *dev,
struct drm_mode_object *obj) struct drm_mode_object *obj)
{ {
mutex_lock(&dev->mode_config.idr_mutex); mutex_lock(&dev->mode_config.idr_mutex);
idr_replace(&dev->mode_config.crtc_idr, obj, obj->id); idr_replace(&dev->mode_config.object_idr, obj, obj->id);
mutex_unlock(&dev->mode_config.idr_mutex); mutex_unlock(&dev->mode_config.idr_mutex);
} }
@ -99,7 +99,7 @@ void drm_mode_object_unregister(struct drm_device *dev,
{ {
mutex_lock(&dev->mode_config.idr_mutex); mutex_lock(&dev->mode_config.idr_mutex);
if (object->id) { if (object->id) {
idr_remove(&dev->mode_config.crtc_idr, object->id); idr_remove(&dev->mode_config.object_idr, object->id);
object->id = 0; object->id = 0;
} }
mutex_unlock(&dev->mode_config.idr_mutex); mutex_unlock(&dev->mode_config.idr_mutex);
@ -131,7 +131,7 @@ struct drm_mode_object *__drm_mode_object_find(struct drm_device *dev,
struct drm_mode_object *obj = NULL; struct drm_mode_object *obj = NULL;
mutex_lock(&dev->mode_config.idr_mutex); mutex_lock(&dev->mode_config.idr_mutex);
obj = idr_find(&dev->mode_config.crtc_idr, id); obj = idr_find(&dev->mode_config.object_idr, id);
if (obj && type != DRM_MODE_OBJECT_ANY && obj->type != type) if (obj && type != DRM_MODE_OBJECT_ANY && obj->type != type)
obj = NULL; obj = NULL;
if (obj && obj->id != id) if (obj && obj->id != id)
@ -459,12 +459,13 @@ static int set_property_atomic(struct drm_mode_object *obj,
struct drm_modeset_acquire_ctx ctx; struct drm_modeset_acquire_ctx ctx;
int ret; int ret;
drm_modeset_acquire_init(&ctx, 0);
state = drm_atomic_state_alloc(dev); state = drm_atomic_state_alloc(dev);
if (!state) if (!state)
return -ENOMEM; return -ENOMEM;
drm_modeset_acquire_init(&ctx, 0);
state->acquire_ctx = &ctx; state->acquire_ctx = &ctx;
retry: retry:
if (prop == state->dev->mode_config.dpms_property) { if (prop == state->dev->mode_config.dpms_property) {
if (obj->type != DRM_MODE_OBJECT_CONNECTOR) { if (obj->type != DRM_MODE_OBJECT_CONNECTOR) {

View File

@ -22,6 +22,7 @@
*/ */
#include <drm/drmP.h> #include <drm/drmP.h>
#include <drm/drm_atomic.h>
#include <drm/drm_crtc.h> #include <drm/drm_crtc.h>
#include <drm/drm_modeset_lock.h> #include <drm/drm_modeset_lock.h>
@ -394,6 +395,7 @@ EXPORT_SYMBOL(drm_modeset_unlock);
int drm_modeset_lock_all_ctx(struct drm_device *dev, int drm_modeset_lock_all_ctx(struct drm_device *dev,
struct drm_modeset_acquire_ctx *ctx) struct drm_modeset_acquire_ctx *ctx)
{ {
struct drm_private_obj *privobj;
struct drm_crtc *crtc; struct drm_crtc *crtc;
struct drm_plane *plane; struct drm_plane *plane;
int ret; int ret;
@ -414,6 +416,12 @@ int drm_modeset_lock_all_ctx(struct drm_device *dev,
return ret; return ret;
} }
drm_for_each_privobj(privobj, dev) {
ret = drm_modeset_lock(&privobj->lock, ctx);
if (ret)
return ret;
}
return 0; return 0;
} }
EXPORT_SYMBOL(drm_modeset_lock_all_ctx); EXPORT_SYMBOL(drm_modeset_lock_all_ctx);

View File

@ -56,6 +56,16 @@
#include "drm_internal.h" #include "drm_internal.h"
#include <drm/drm_syncobj.h> #include <drm/drm_syncobj.h>
struct syncobj_wait_entry {
struct list_head node;
struct task_struct *task;
struct dma_fence *fence;
struct dma_fence_cb fence_cb;
};
static void syncobj_wait_syncobj_func(struct drm_syncobj *syncobj,
struct syncobj_wait_entry *wait);
/** /**
* drm_syncobj_find - lookup and reference a sync object. * drm_syncobj_find - lookup and reference a sync object.
* @file_private: drm file private pointer * @file_private: drm file private pointer
@ -82,58 +92,33 @@ struct drm_syncobj *drm_syncobj_find(struct drm_file *file_private,
} }
EXPORT_SYMBOL(drm_syncobj_find); EXPORT_SYMBOL(drm_syncobj_find);
static void drm_syncobj_add_callback_locked(struct drm_syncobj *syncobj, static void drm_syncobj_fence_add_wait(struct drm_syncobj *syncobj,
struct drm_syncobj_cb *cb, struct syncobj_wait_entry *wait)
drm_syncobj_func_t func)
{ {
cb->func = func; if (wait->fence)
list_add_tail(&cb->node, &syncobj->cb_list); return;
}
static int drm_syncobj_fence_get_or_add_callback(struct drm_syncobj *syncobj,
struct dma_fence **fence,
struct drm_syncobj_cb *cb,
drm_syncobj_func_t func)
{
int ret;
*fence = drm_syncobj_fence_get(syncobj);
if (*fence)
return 1;
spin_lock(&syncobj->lock); spin_lock(&syncobj->lock);
/* We've already tried once to get a fence and failed. Now that we /* We've already tried once to get a fence and failed. Now that we
* have the lock, try one more time just to be sure we don't add a * have the lock, try one more time just to be sure we don't add a
* callback when a fence has already been set. * callback when a fence has already been set.
*/ */
if (syncobj->fence) { if (syncobj->fence)
*fence = dma_fence_get(rcu_dereference_protected(syncobj->fence, wait->fence = dma_fence_get(
lockdep_is_held(&syncobj->lock))); rcu_dereference_protected(syncobj->fence, 1));
ret = 1; else
} else { list_add_tail(&wait->node, &syncobj->cb_list);
*fence = NULL;
drm_syncobj_add_callback_locked(syncobj, cb, func);
ret = 0;
}
spin_unlock(&syncobj->lock);
return ret;
}
void drm_syncobj_add_callback(struct drm_syncobj *syncobj,
struct drm_syncobj_cb *cb,
drm_syncobj_func_t func)
{
spin_lock(&syncobj->lock);
drm_syncobj_add_callback_locked(syncobj, cb, func);
spin_unlock(&syncobj->lock); spin_unlock(&syncobj->lock);
} }
void drm_syncobj_remove_callback(struct drm_syncobj *syncobj, static void drm_syncobj_remove_wait(struct drm_syncobj *syncobj,
struct drm_syncobj_cb *cb) struct syncobj_wait_entry *wait)
{ {
if (!wait->node.next)
return;
spin_lock(&syncobj->lock); spin_lock(&syncobj->lock);
list_del_init(&cb->node); list_del_init(&wait->node);
spin_unlock(&syncobj->lock); spin_unlock(&syncobj->lock);
} }
@ -148,7 +133,7 @@ void drm_syncobj_replace_fence(struct drm_syncobj *syncobj,
struct dma_fence *fence) struct dma_fence *fence)
{ {
struct dma_fence *old_fence; struct dma_fence *old_fence;
struct drm_syncobj_cb *cur, *tmp; struct syncobj_wait_entry *cur, *tmp;
if (fence) if (fence)
dma_fence_get(fence); dma_fence_get(fence);
@ -162,7 +147,7 @@ void drm_syncobj_replace_fence(struct drm_syncobj *syncobj,
if (fence != old_fence) { if (fence != old_fence) {
list_for_each_entry_safe(cur, tmp, &syncobj->cb_list, node) { list_for_each_entry_safe(cur, tmp, &syncobj->cb_list, node) {
list_del_init(&cur->node); list_del_init(&cur->node);
cur->func(syncobj, cur); syncobj_wait_syncobj_func(syncobj, cur);
} }
} }
@ -608,13 +593,6 @@ drm_syncobj_fd_to_handle_ioctl(struct drm_device *dev, void *data,
&args->handle); &args->handle);
} }
struct syncobj_wait_entry {
struct task_struct *task;
struct dma_fence *fence;
struct dma_fence_cb fence_cb;
struct drm_syncobj_cb syncobj_cb;
};
static void syncobj_wait_fence_func(struct dma_fence *fence, static void syncobj_wait_fence_func(struct dma_fence *fence,
struct dma_fence_cb *cb) struct dma_fence_cb *cb)
{ {
@ -625,11 +603,8 @@ static void syncobj_wait_fence_func(struct dma_fence *fence,
} }
static void syncobj_wait_syncobj_func(struct drm_syncobj *syncobj, static void syncobj_wait_syncobj_func(struct drm_syncobj *syncobj,
struct drm_syncobj_cb *cb) struct syncobj_wait_entry *wait)
{ {
struct syncobj_wait_entry *wait =
container_of(cb, struct syncobj_wait_entry, syncobj_cb);
/* This happens inside the syncobj lock */ /* This happens inside the syncobj lock */
wait->fence = dma_fence_get(rcu_dereference_protected(syncobj->fence, wait->fence = dma_fence_get(rcu_dereference_protected(syncobj->fence,
lockdep_is_held(&syncobj->lock))); lockdep_is_held(&syncobj->lock)));
@ -688,12 +663,8 @@ static signed long drm_syncobj_array_wait_timeout(struct drm_syncobj **syncobjs,
*/ */
if (flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT) { if (flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT) {
for (i = 0; i < count; ++i) { for (i = 0; i < count; ++i)
drm_syncobj_fence_get_or_add_callback(syncobjs[i], drm_syncobj_fence_add_wait(syncobjs[i], &entries[i]);
&entries[i].fence,
&entries[i].syncobj_cb,
syncobj_wait_syncobj_func);
}
} }
do { do {
@ -742,9 +713,7 @@ done_waiting:
cleanup_entries: cleanup_entries:
for (i = 0; i < count; ++i) { for (i = 0; i < count; ++i) {
if (entries[i].syncobj_cb.func) drm_syncobj_remove_wait(syncobjs[i], &entries[i]);
drm_syncobj_remove_callback(syncobjs[i],
&entries[i].syncobj_cb);
if (entries[i].fence_cb.func) if (entries[i].fence_cb.func)
dma_fence_remove_callback(entries[i].fence, dma_fence_remove_callback(entries[i].fence,
&entries[i].fence_cb); &entries[i].fence_cb);

View File

@ -449,7 +449,7 @@ static void etnaviv_gem_describe_fence(struct dma_fence *fence,
const char *type, struct seq_file *m) const char *type, struct seq_file *m)
{ {
if (!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) if (!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
seq_printf(m, "\t%9s: %s %s seq %u\n", seq_printf(m, "\t%9s: %s %s seq %llu\n",
type, type,
fence->ops->get_driver_name(fence), fence->ops->get_driver_name(fence),
fence->ops->get_timeline_name(fence), fence->ops->get_timeline_name(fence),

View File

@ -3184,7 +3184,7 @@ i915_gem_reset_request(struct intel_engine_cs *engine,
*/ */
if (i915_request_completed(request)) { if (i915_request_completed(request)) {
GEM_TRACE("%s pardoned global=%d (fence %llx:%d), current %d\n", GEM_TRACE("%s pardoned global=%d (fence %llx:%lld), current %d\n",
engine->name, request->global_seqno, engine->name, request->global_seqno,
request->fence.context, request->fence.seqno, request->fence.context, request->fence.seqno,
intel_engine_get_seqno(engine)); intel_engine_get_seqno(engine));
@ -3308,7 +3308,7 @@ static void nop_submit_request(struct i915_request *request)
{ {
unsigned long flags; unsigned long flags;
GEM_TRACE("%s fence %llx:%d -> -EIO\n", GEM_TRACE("%s fence %llx:%lld -> -EIO\n",
request->engine->name, request->engine->name,
request->fence.context, request->fence.seqno); request->fence.context, request->fence.seqno);
dma_fence_set_error(&request->fence, -EIO); dma_fence_set_error(&request->fence, -EIO);

View File

@ -649,7 +649,7 @@ last_request_on_engine(struct i915_timeline *timeline,
rq = i915_gem_active_raw(&timeline->last_request, rq = i915_gem_active_raw(&timeline->last_request,
&engine->i915->drm.struct_mutex); &engine->i915->drm.struct_mutex);
if (rq && rq->engine == engine) { if (rq && rq->engine == engine) {
GEM_TRACE("last request for %s on engine %s: %llx:%d\n", GEM_TRACE("last request for %s on engine %s: %llx:%llu\n",
timeline->name, engine->name, timeline->name, engine->name,
rq->fence.context, rq->fence.seqno); rq->fence.context, rq->fence.seqno);
GEM_BUG_ON(rq->timeline != timeline); GEM_BUG_ON(rq->timeline != timeline);
@ -686,14 +686,14 @@ static bool engine_has_kernel_context_barrier(struct intel_engine_cs *engine)
* switch-to-kernel-context? * switch-to-kernel-context?
*/ */
if (!i915_timeline_sync_is_later(barrier, &rq->fence)) { if (!i915_timeline_sync_is_later(barrier, &rq->fence)) {
GEM_TRACE("%s needs barrier for %llx:%d\n", GEM_TRACE("%s needs barrier for %llx:%lld\n",
ring->timeline->name, ring->timeline->name,
rq->fence.context, rq->fence.context,
rq->fence.seqno); rq->fence.seqno);
return false; return false;
} }
GEM_TRACE("%s has barrier after %llx:%d\n", GEM_TRACE("%s has barrier after %llx:%lld\n",
ring->timeline->name, ring->timeline->name,
rq->fence.context, rq->fence.context,
rq->fence.seqno); rq->fence.seqno);
@ -749,7 +749,7 @@ int i915_gem_switch_to_kernel_context(struct drm_i915_private *i915)
if (prev->gem_context == i915->kernel_context) if (prev->gem_context == i915->kernel_context)
continue; continue;
GEM_TRACE("add barrier on %s for %llx:%d\n", GEM_TRACE("add barrier on %s for %llx:%lld\n",
engine->name, engine->name,
prev->fence.context, prev->fence.context,
prev->fence.seqno); prev->fence.seqno);

View File

@ -270,7 +270,7 @@ static void free_capture_list(struct i915_request *request)
static void __retire_engine_request(struct intel_engine_cs *engine, static void __retire_engine_request(struct intel_engine_cs *engine,
struct i915_request *rq) struct i915_request *rq)
{ {
GEM_TRACE("%s(%s) fence %llx:%d, global=%d, current %d\n", GEM_TRACE("%s(%s) fence %llx:%lld, global=%d, current %d\n",
__func__, engine->name, __func__, engine->name,
rq->fence.context, rq->fence.seqno, rq->fence.context, rq->fence.seqno,
rq->global_seqno, rq->global_seqno,
@ -332,7 +332,7 @@ static void i915_request_retire(struct i915_request *request)
{ {
struct i915_gem_active *active, *next; struct i915_gem_active *active, *next;
GEM_TRACE("%s fence %llx:%d, global=%d, current %d\n", GEM_TRACE("%s fence %llx:%lld, global=%d, current %d\n",
request->engine->name, request->engine->name,
request->fence.context, request->fence.seqno, request->fence.context, request->fence.seqno,
request->global_seqno, request->global_seqno,
@ -395,7 +395,7 @@ void i915_request_retire_upto(struct i915_request *rq)
struct intel_ring *ring = rq->ring; struct intel_ring *ring = rq->ring;
struct i915_request *tmp; struct i915_request *tmp;
GEM_TRACE("%s fence %llx:%d, global=%d, current %d\n", GEM_TRACE("%s fence %llx:%lld, global=%d, current %d\n",
rq->engine->name, rq->engine->name,
rq->fence.context, rq->fence.seqno, rq->fence.context, rq->fence.seqno,
rq->global_seqno, rq->global_seqno,
@ -436,7 +436,7 @@ void __i915_request_submit(struct i915_request *request)
struct intel_engine_cs *engine = request->engine; struct intel_engine_cs *engine = request->engine;
u32 seqno; u32 seqno;
GEM_TRACE("%s fence %llx:%d -> global=%d, current %d\n", GEM_TRACE("%s fence %llx:%lld -> global=%d, current %d\n",
engine->name, engine->name,
request->fence.context, request->fence.seqno, request->fence.context, request->fence.seqno,
engine->timeline.seqno + 1, engine->timeline.seqno + 1,
@ -486,7 +486,7 @@ void __i915_request_unsubmit(struct i915_request *request)
{ {
struct intel_engine_cs *engine = request->engine; struct intel_engine_cs *engine = request->engine;
GEM_TRACE("%s fence %llx:%d <- global=%d, current %d\n", GEM_TRACE("%s fence %llx:%lld <- global=%d, current %d\n",
engine->name, engine->name,
request->fence.context, request->fence.seqno, request->fence.context, request->fence.seqno,
request->global_seqno, request->global_seqno,
@ -961,7 +961,7 @@ void i915_request_add(struct i915_request *request)
struct i915_request *prev; struct i915_request *prev;
u32 *cs; u32 *cs;
GEM_TRACE("%s fence %llx:%d\n", GEM_TRACE("%s fence %llx:%lld\n",
engine->name, request->fence.context, request->fence.seqno); engine->name, request->fence.context, request->fence.seqno);
lockdep_assert_held(&request->i915->drm.struct_mutex); lockdep_assert_held(&request->i915->drm.struct_mutex);

View File

@ -390,7 +390,7 @@ static void timer_i915_sw_fence_wake(struct timer_list *t)
if (!fence) if (!fence)
return; return;
pr_notice("Asynchronous wait on fence %s:%s:%x timed out (hint:%pS)\n", pr_notice("Asynchronous wait on fence %s:%s:%llx timed out (hint:%pS)\n",
cb->dma->ops->get_driver_name(cb->dma), cb->dma->ops->get_driver_name(cb->dma),
cb->dma->ops->get_timeline_name(cb->dma), cb->dma->ops->get_timeline_name(cb->dma),
cb->dma->seqno, cb->dma->seqno,

View File

@ -517,20 +517,10 @@ static void intel_dp_destroy_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
drm_connector_put(connector); drm_connector_put(connector);
} }
static void intel_dp_mst_hotplug(struct drm_dp_mst_topology_mgr *mgr)
{
struct intel_dp *intel_dp = container_of(mgr, struct intel_dp, mst_mgr);
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
struct drm_device *dev = intel_dig_port->base.base.dev;
drm_kms_helper_hotplug_event(dev);
}
static const struct drm_dp_mst_topology_cbs mst_cbs = { static const struct drm_dp_mst_topology_cbs mst_cbs = {
.add_connector = intel_dp_add_mst_connector, .add_connector = intel_dp_add_mst_connector,
.register_connector = intel_dp_register_mst_connector, .register_connector = intel_dp_register_mst_connector,
.destroy_connector = intel_dp_destroy_mst_connector, .destroy_connector = intel_dp_destroy_mst_connector,
.hotplug = intel_dp_mst_hotplug,
}; };
static struct intel_dp_mst_encoder * static struct intel_dp_mst_encoder *

View File

@ -1201,7 +1201,7 @@ static void print_request(struct drm_printer *m,
x = print_sched_attr(rq->i915, &rq->sched.attr, buf, x, sizeof(buf)); x = print_sched_attr(rq->i915, &rq->sched.attr, buf, x, sizeof(buf));
drm_printf(m, "%s%x%s [%llx:%x]%s @ %dms: %s\n", drm_printf(m, "%s%x%s [%llx:%llx]%s @ %dms: %s\n",
prefix, prefix,
rq->global_seqno, rq->global_seqno,
i915_request_completed(rq) ? "!" : "", i915_request_completed(rq) ? "!" : "",

View File

@ -455,7 +455,7 @@ static void execlists_submit_ports(struct intel_engine_cs *engine)
desc = execlists_update_context(rq); desc = execlists_update_context(rq);
GEM_DEBUG_EXEC(port[n].context_id = upper_32_bits(desc)); GEM_DEBUG_EXEC(port[n].context_id = upper_32_bits(desc));
GEM_TRACE("%s in[%d]: ctx=%d.%d, global=%d (fence %llx:%d) (current %d), prio=%d\n", GEM_TRACE("%s in[%d]: ctx=%d.%d, global=%d (fence %llx:%lld) (current %d), prio=%d\n",
engine->name, n, engine->name, n,
port[n].context_id, count, port[n].context_id, count,
rq->global_seqno, rq->global_seqno,
@ -748,7 +748,7 @@ execlists_cancel_port_requests(struct intel_engine_execlists * const execlists)
while (num_ports-- && port_isset(port)) { while (num_ports-- && port_isset(port)) {
struct i915_request *rq = port_request(port); struct i915_request *rq = port_request(port);
GEM_TRACE("%s:port%u global=%d (fence %llx:%d), (current %d)\n", GEM_TRACE("%s:port%u global=%d (fence %llx:%lld), (current %d)\n",
rq->engine->name, rq->engine->name,
(unsigned int)(port - execlists->port), (unsigned int)(port - execlists->port),
rq->global_seqno, rq->global_seqno,
@ -966,7 +966,7 @@ static void process_csb(struct intel_engine_cs *engine)
EXECLISTS_ACTIVE_USER)); EXECLISTS_ACTIVE_USER));
rq = port_unpack(port, &count); rq = port_unpack(port, &count);
GEM_TRACE("%s out[0]: ctx=%d.%d, global=%d (fence %llx:%d) (current %d), prio=%d\n", GEM_TRACE("%s out[0]: ctx=%d.%d, global=%d (fence %llx:%lld) (current %d), prio=%d\n",
engine->name, engine->name,
port->context_id, count, port->context_id, count,
rq ? rq->global_seqno : 0, rq ? rq->global_seqno : 0,

View File

@ -451,7 +451,7 @@ static int __igt_reset_engine(struct drm_i915_private *i915, bool active)
if (!wait_until_running(&h, rq)) { if (!wait_until_running(&h, rq)) {
struct drm_printer p = drm_info_printer(i915->drm.dev); struct drm_printer p = drm_info_printer(i915->drm.dev);
pr_err("%s: Failed to start request %x, at %x\n", pr_err("%s: Failed to start request %llx, at %x\n",
__func__, rq->fence.seqno, hws_seqno(&h, rq)); __func__, rq->fence.seqno, hws_seqno(&h, rq));
intel_engine_dump(engine, &p, intel_engine_dump(engine, &p,
"%s\n", engine->name); "%s\n", engine->name);
@ -552,7 +552,7 @@ static int active_request_put(struct i915_request *rq)
return 0; return 0;
if (i915_request_wait(rq, 0, 5 * HZ) < 0) { if (i915_request_wait(rq, 0, 5 * HZ) < 0) {
GEM_TRACE("%s timed out waiting for completion of fence %llx:%d, seqno %d.\n", GEM_TRACE("%s timed out waiting for completion of fence %llx:%lld, seqno %d.\n",
rq->engine->name, rq->engine->name,
rq->fence.context, rq->fence.context,
rq->fence.seqno, rq->fence.seqno,
@ -729,7 +729,7 @@ static int __igt_reset_engines(struct drm_i915_private *i915,
if (!wait_until_running(&h, rq)) { if (!wait_until_running(&h, rq)) {
struct drm_printer p = drm_info_printer(i915->drm.dev); struct drm_printer p = drm_info_printer(i915->drm.dev);
pr_err("%s: Failed to start request %x, at %x\n", pr_err("%s: Failed to start request %llx, at %x\n",
__func__, rq->fence.seqno, hws_seqno(&h, rq)); __func__, rq->fence.seqno, hws_seqno(&h, rq));
intel_engine_dump(engine, &p, intel_engine_dump(engine, &p,
"%s\n", engine->name); "%s\n", engine->name);
@ -928,7 +928,7 @@ static int igt_reset_wait(void *arg)
if (!wait_until_running(&h, rq)) { if (!wait_until_running(&h, rq)) {
struct drm_printer p = drm_info_printer(i915->drm.dev); struct drm_printer p = drm_info_printer(i915->drm.dev);
pr_err("%s: Failed to start request %x, at %x\n", pr_err("%s: Failed to start request %llx, at %x\n",
__func__, rq->fence.seqno, hws_seqno(&h, rq)); __func__, rq->fence.seqno, hws_seqno(&h, rq));
intel_engine_dump(rq->engine, &p, "%s\n", rq->engine->name); intel_engine_dump(rq->engine, &p, "%s\n", rq->engine->name);
@ -1107,7 +1107,7 @@ static int __igt_reset_evict_vma(struct drm_i915_private *i915,
if (!wait_until_running(&h, rq)) { if (!wait_until_running(&h, rq)) {
struct drm_printer p = drm_info_printer(i915->drm.dev); struct drm_printer p = drm_info_printer(i915->drm.dev);
pr_err("%s: Failed to start request %x, at %x\n", pr_err("%s: Failed to start request %llx, at %x\n",
__func__, rq->fence.seqno, hws_seqno(&h, rq)); __func__, rq->fence.seqno, hws_seqno(&h, rq));
intel_engine_dump(rq->engine, &p, "%s\n", rq->engine->name); intel_engine_dump(rq->engine, &p, "%s\n", rq->engine->name);
@ -1302,7 +1302,7 @@ static int igt_reset_queue(void *arg)
if (!wait_until_running(&h, prev)) { if (!wait_until_running(&h, prev)) {
struct drm_printer p = drm_info_printer(i915->drm.dev); struct drm_printer p = drm_info_printer(i915->drm.dev);
pr_err("%s(%s): Failed to start request %x, at %x\n", pr_err("%s(%s): Failed to start request %llx, at %x\n",
__func__, engine->name, __func__, engine->name,
prev->fence.seqno, hws_seqno(&h, prev)); prev->fence.seqno, hws_seqno(&h, prev));
intel_engine_dump(engine, &p, intel_engine_dump(engine, &p,
@ -1413,7 +1413,7 @@ static int igt_handle_error(void *arg)
if (!wait_until_running(&h, rq)) { if (!wait_until_running(&h, rq)) {
struct drm_printer p = drm_info_printer(i915->drm.dev); struct drm_printer p = drm_info_printer(i915->drm.dev);
pr_err("%s: Failed to start request %x, at %x\n", pr_err("%s: Failed to start request %llx, at %x\n",
__func__, rq->fence.seqno, hws_seqno(&h, rq)); __func__, rq->fence.seqno, hws_seqno(&h, rq));
intel_engine_dump(rq->engine, &p, "%s\n", rq->engine->name); intel_engine_dump(rq->engine, &p, "%s\n", rq->engine->name);

View File

@ -144,7 +144,7 @@ static int mdp5_global_obj_init(struct mdp5_kms *mdp5_kms)
state->mdp5_kms = mdp5_kms; state->mdp5_kms = mdp5_kms;
drm_atomic_private_obj_init(&mdp5_kms->glob_state, drm_atomic_private_obj_init(mdp5_kms->dev, &mdp5_kms->glob_state,
&state->base, &state->base,
&mdp5_global_state_funcs); &mdp5_global_state_funcs);
return 0; return 0;

View File

@ -1062,13 +1062,6 @@ nv50_mstm_prepare(struct nv50_mstm *mstm)
} }
} }
static void
nv50_mstm_hotplug(struct drm_dp_mst_topology_mgr *mgr)
{
struct nv50_mstm *mstm = nv50_mstm(mgr);
drm_kms_helper_hotplug_event(mstm->outp->base.base.dev);
}
static void static void
nv50_mstm_destroy_connector(struct drm_dp_mst_topology_mgr *mgr, nv50_mstm_destroy_connector(struct drm_dp_mst_topology_mgr *mgr,
struct drm_connector *connector) struct drm_connector *connector)
@ -1120,7 +1113,6 @@ nv50_mstm = {
.add_connector = nv50_mstm_add_connector, .add_connector = nv50_mstm_add_connector,
.register_connector = nv50_mstm_register_connector, .register_connector = nv50_mstm_register_connector,
.destroy_connector = nv50_mstm_destroy_connector, .destroy_connector = nv50_mstm_destroy_connector,
.hotplug = nv50_mstm_hotplug,
}; };
void void

View File

@ -1010,7 +1010,6 @@ static void qxl_conn_destroy(struct drm_connector *connector)
} }
static const struct drm_connector_funcs qxl_connector_funcs = { static const struct drm_connector_funcs qxl_connector_funcs = {
.dpms = drm_helper_connector_dpms,
.detect = qxl_conn_detect, .detect = qxl_conn_detect,
.fill_modes = drm_helper_probe_single_connector_modes, .fill_modes = drm_helper_probe_single_connector_modes,
.destroy = qxl_conn_destroy, .destroy = qxl_conn_destroy,

View File

@ -560,11 +560,12 @@ static int r128_do_init_cce(struct drm_device *dev, drm_r128_init_t *init)
dev_priv->gart_info.addr = NULL; dev_priv->gart_info.addr = NULL;
dev_priv->gart_info.bus_addr = 0; dev_priv->gart_info.bus_addr = 0;
dev_priv->gart_info.gart_reg_if = DRM_ATI_GART_PCI; dev_priv->gart_info.gart_reg_if = DRM_ATI_GART_PCI;
if (!drm_ati_pcigart_init(dev, &dev_priv->gart_info)) { rc = drm_ati_pcigart_init(dev, &dev_priv->gart_info);
if (rc) {
DRM_ERROR("failed to init PCI GART!\n"); DRM_ERROR("failed to init PCI GART!\n");
dev->dev_private = (void *)dev_priv; dev->dev_private = (void *)dev_priv;
r128_do_cleanup_cce(dev); r128_do_cleanup_cce(dev);
return -ENOMEM; return rc;
} }
R128_WRITE(R128_PCI_GART_PAGE, dev_priv->gart_info.bus_addr); R128_WRITE(R128_PCI_GART_PAGE, dev_priv->gart_info.bus_addr);
#if IS_ENABLED(CONFIG_AGP) #if IS_ENABLED(CONFIG_AGP)

View File

@ -320,19 +320,10 @@ static void radeon_dp_destroy_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
DRM_DEBUG_KMS("\n"); DRM_DEBUG_KMS("\n");
} }
static void radeon_dp_mst_hotplug(struct drm_dp_mst_topology_mgr *mgr)
{
struct radeon_connector *master = container_of(mgr, struct radeon_connector, mst_mgr);
struct drm_device *dev = master->base.dev;
drm_kms_helper_hotplug_event(dev);
}
static const struct drm_dp_mst_topology_cbs mst_cbs = { static const struct drm_dp_mst_topology_cbs mst_cbs = {
.add_connector = radeon_dp_add_mst_connector, .add_connector = radeon_dp_add_mst_connector,
.register_connector = radeon_dp_register_mst_connector, .register_connector = radeon_dp_register_mst_connector,
.destroy_connector = radeon_dp_destroy_mst_connector, .destroy_connector = radeon_dp_destroy_mst_connector,
.hotplug = radeon_dp_mst_hotplug,
}; };
static struct static struct

View File

@ -18,6 +18,7 @@
#include <drm/drm_crtc.h> #include <drm/drm_crtc.h>
#include <drm/drm_crtc_helper.h> #include <drm/drm_crtc_helper.h>
#include <drm/drm_flip_work.h> #include <drm/drm_flip_work.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_plane_helper.h> #include <drm/drm_plane_helper.h>
#ifdef CONFIG_DRM_ANALOGIX_DP #ifdef CONFIG_DRM_ANALOGIX_DP
#include <drm/bridge/analogix_dp.h> #include <drm/bridge/analogix_dp.h>
@ -823,6 +824,7 @@ static const struct drm_plane_helper_funcs plane_helper_funcs = {
.atomic_check = vop_plane_atomic_check, .atomic_check = vop_plane_atomic_check,
.atomic_update = vop_plane_atomic_update, .atomic_update = vop_plane_atomic_update,
.atomic_disable = vop_plane_atomic_disable, .atomic_disable = vop_plane_atomic_disable,
.prepare_fb = drm_gem_fb_prepare_fb,
}; };
static const struct drm_plane_funcs vop_plane_funcs = { static const struct drm_plane_funcs vop_plane_funcs = {

View File

@ -716,7 +716,7 @@ static int tegra_display_hub_init(struct host1x_client *client)
if (!state) if (!state)
return -ENOMEM; return -ENOMEM;
drm_atomic_private_obj_init(&hub->base, &state->base, drm_atomic_private_obj_init(drm, &hub->base, &state->base,
&tegra_display_hub_state_funcs); &tegra_display_hub_state_funcs);
tegra->hub = hub; tegra->hub = hub;

View File

@ -308,7 +308,6 @@ void v3d_exec_put(struct v3d_exec_info *exec);
void v3d_tfu_job_put(struct v3d_tfu_job *exec); void v3d_tfu_job_put(struct v3d_tfu_job *exec);
void v3d_reset(struct v3d_dev *v3d); void v3d_reset(struct v3d_dev *v3d);
void v3d_invalidate_caches(struct v3d_dev *v3d); void v3d_invalidate_caches(struct v3d_dev *v3d);
void v3d_flush_caches(struct v3d_dev *v3d);
/* v3d_irq.c */ /* v3d_irq.c */
void v3d_irq_init(struct v3d_dev *v3d); void v3d_irq_init(struct v3d_dev *v3d);

View File

@ -130,38 +130,31 @@ v3d_flush_l3(struct v3d_dev *v3d)
} }
} }
/* Invalidates the (read-only) L2 cache. */ /* Invalidates the (read-only) L2C cache. This was the L2 cache for
* uniforms and instructions on V3D 3.2.
*/
static void static void
v3d_invalidate_l2(struct v3d_dev *v3d, int core) v3d_invalidate_l2c(struct v3d_dev *v3d, int core)
{ {
if (v3d->ver > 32)
return;
V3D_CORE_WRITE(core, V3D_CTL_L2CACTL, V3D_CORE_WRITE(core, V3D_CTL_L2CACTL,
V3D_L2CACTL_L2CCLR | V3D_L2CACTL_L2CCLR |
V3D_L2CACTL_L2CENA); V3D_L2CACTL_L2CENA);
} }
static void
v3d_invalidate_l1td(struct v3d_dev *v3d, int core)
{
V3D_CORE_WRITE(core, V3D_CTL_L2TCACTL, V3D_L2TCACTL_TMUWCF);
if (wait_for(!(V3D_CORE_READ(core, V3D_CTL_L2TCACTL) &
V3D_L2TCACTL_L2TFLS), 100)) {
DRM_ERROR("Timeout waiting for L1T write combiner flush\n");
}
}
/* Invalidates texture L2 cachelines */ /* Invalidates texture L2 cachelines */
static void static void
v3d_flush_l2t(struct v3d_dev *v3d, int core) v3d_flush_l2t(struct v3d_dev *v3d, int core)
{ {
v3d_invalidate_l1td(v3d, core); /* While there is a busy bit (V3D_L2TCACTL_L2TFLS), we don't
* need to wait for completion before dispatching the job --
* L2T accesses will be stalled until the flush has completed.
*/
V3D_CORE_WRITE(core, V3D_CTL_L2TCACTL, V3D_CORE_WRITE(core, V3D_CTL_L2TCACTL,
V3D_L2TCACTL_L2TFLS | V3D_L2TCACTL_L2TFLS |
V3D_SET_FIELD(V3D_L2TCACTL_FLM_FLUSH, V3D_L2TCACTL_FLM)); V3D_SET_FIELD(V3D_L2TCACTL_FLM_FLUSH, V3D_L2TCACTL_FLM));
if (wait_for(!(V3D_CORE_READ(core, V3D_CTL_L2TCACTL) &
V3D_L2TCACTL_L2TFLS), 100)) {
DRM_ERROR("Timeout waiting for L2T flush\n");
}
} }
/* Invalidates the slice caches. These are read-only caches. */ /* Invalidates the slice caches. These are read-only caches. */
@ -175,35 +168,18 @@ v3d_invalidate_slices(struct v3d_dev *v3d, int core)
V3D_SET_FIELD(0xf, V3D_SLCACTL_ICC)); V3D_SET_FIELD(0xf, V3D_SLCACTL_ICC));
} }
/* Invalidates texture L2 cachelines */
static void
v3d_invalidate_l2t(struct v3d_dev *v3d, int core)
{
V3D_CORE_WRITE(core,
V3D_CTL_L2TCACTL,
V3D_L2TCACTL_L2TFLS |
V3D_SET_FIELD(V3D_L2TCACTL_FLM_CLEAR, V3D_L2TCACTL_FLM));
if (wait_for(!(V3D_CORE_READ(core, V3D_CTL_L2TCACTL) &
V3D_L2TCACTL_L2TFLS), 100)) {
DRM_ERROR("Timeout waiting for L2T invalidate\n");
}
}
void void
v3d_invalidate_caches(struct v3d_dev *v3d) v3d_invalidate_caches(struct v3d_dev *v3d)
{ {
/* Invalidate the caches from the outside in. That way if
* another CL's concurrent use of nearby memory were to pull
* an invalidated cacheline back in, we wouldn't leave stale
* data in the inner cache.
*/
v3d_flush_l3(v3d); v3d_flush_l3(v3d);
v3d_invalidate_l2c(v3d, 0);
v3d_invalidate_l2(v3d, 0);
v3d_invalidate_slices(v3d, 0);
v3d_flush_l2t(v3d, 0); v3d_flush_l2t(v3d, 0);
} v3d_invalidate_slices(v3d, 0);
void
v3d_flush_caches(struct v3d_dev *v3d)
{
v3d_invalidate_l1td(v3d, 0);
v3d_invalidate_l2t(v3d, 0);
} }
static void static void

View File

@ -49,6 +49,13 @@ struct vc4_crtc_state {
struct drm_mm_node mm; struct drm_mm_node mm;
bool feed_txp; bool feed_txp;
bool txp_armed; bool txp_armed;
struct {
unsigned int left;
unsigned int right;
unsigned int top;
unsigned int bottom;
} margins;
}; };
static inline struct vc4_crtc_state * static inline struct vc4_crtc_state *
@ -624,6 +631,37 @@ static enum drm_mode_status vc4_crtc_mode_valid(struct drm_crtc *crtc,
return MODE_OK; return MODE_OK;
} }
void vc4_crtc_get_margins(struct drm_crtc_state *state,
unsigned int *left, unsigned int *right,
unsigned int *top, unsigned int *bottom)
{
struct vc4_crtc_state *vc4_state = to_vc4_crtc_state(state);
struct drm_connector_state *conn_state;
struct drm_connector *conn;
int i;
*left = vc4_state->margins.left;
*right = vc4_state->margins.right;
*top = vc4_state->margins.top;
*bottom = vc4_state->margins.bottom;
/* We have to interate over all new connector states because
* vc4_crtc_get_margins() might be called before
* vc4_crtc_atomic_check() which means margins info in vc4_crtc_state
* might be outdated.
*/
for_each_new_connector_in_state(state->state, conn, conn_state, i) {
if (conn_state->crtc != state->crtc)
continue;
*left = conn_state->tv.margins.left;
*right = conn_state->tv.margins.right;
*top = conn_state->tv.margins.top;
*bottom = conn_state->tv.margins.bottom;
break;
}
}
static int vc4_crtc_atomic_check(struct drm_crtc *crtc, static int vc4_crtc_atomic_check(struct drm_crtc *crtc,
struct drm_crtc_state *state) struct drm_crtc_state *state)
{ {
@ -671,6 +709,10 @@ static int vc4_crtc_atomic_check(struct drm_crtc *crtc,
vc4_state->feed_txp = false; vc4_state->feed_txp = false;
} }
vc4_state->margins.left = conn_state->tv.margins.left;
vc4_state->margins.right = conn_state->tv.margins.right;
vc4_state->margins.top = conn_state->tv.margins.top;
vc4_state->margins.bottom = conn_state->tv.margins.bottom;
break; break;
} }
@ -972,6 +1014,7 @@ static struct drm_crtc_state *vc4_crtc_duplicate_state(struct drm_crtc *crtc)
old_vc4_state = to_vc4_crtc_state(crtc->state); old_vc4_state = to_vc4_crtc_state(crtc->state);
vc4_state->feed_txp = old_vc4_state->feed_txp; vc4_state->feed_txp = old_vc4_state->feed_txp;
vc4_state->margins = old_vc4_state->margins;
__drm_atomic_helper_crtc_duplicate_state(crtc, &vc4_state->base); __drm_atomic_helper_crtc_duplicate_state(crtc, &vc4_state->base);
return &vc4_state->base; return &vc4_state->base;

View File

@ -707,6 +707,9 @@ bool vc4_crtc_get_scanoutpos(struct drm_device *dev, unsigned int crtc_id,
const struct drm_display_mode *mode); const struct drm_display_mode *mode);
void vc4_crtc_handle_vblank(struct vc4_crtc *crtc); void vc4_crtc_handle_vblank(struct vc4_crtc *crtc);
void vc4_crtc_txp_armed(struct drm_crtc_state *state); void vc4_crtc_txp_armed(struct drm_crtc_state *state);
void vc4_crtc_get_margins(struct drm_crtc_state *state,
unsigned int *right, unsigned int *left,
unsigned int *top, unsigned int *bottom);
/* vc4_debugfs.c */ /* vc4_debugfs.c */
int vc4_debugfs_init(struct drm_minor *minor); int vc4_debugfs_init(struct drm_minor *minor);

View File

@ -310,6 +310,7 @@ static struct drm_connector *vc4_hdmi_connector_init(struct drm_device *dev,
{ {
struct drm_connector *connector; struct drm_connector *connector;
struct vc4_hdmi_connector *hdmi_connector; struct vc4_hdmi_connector *hdmi_connector;
int ret;
hdmi_connector = devm_kzalloc(dev->dev, sizeof(*hdmi_connector), hdmi_connector = devm_kzalloc(dev->dev, sizeof(*hdmi_connector),
GFP_KERNEL); GFP_KERNEL);
@ -323,6 +324,13 @@ static struct drm_connector *vc4_hdmi_connector_init(struct drm_device *dev,
DRM_MODE_CONNECTOR_HDMIA); DRM_MODE_CONNECTOR_HDMIA);
drm_connector_helper_add(connector, &vc4_hdmi_connector_helper_funcs); drm_connector_helper_add(connector, &vc4_hdmi_connector_helper_funcs);
/* Create and attach TV margin props to this connector. */
ret = drm_mode_create_tv_margin_properties(dev);
if (ret)
return ERR_PTR(ret);
drm_connector_attach_tv_margin_properties(connector);
connector->polled = (DRM_CONNECTOR_POLL_CONNECT | connector->polled = (DRM_CONNECTOR_POLL_CONNECT |
DRM_CONNECTOR_POLL_DISCONNECT); DRM_CONNECTOR_POLL_DISCONNECT);
@ -408,6 +416,9 @@ static void vc4_hdmi_write_infoframe(struct drm_encoder *encoder,
static void vc4_hdmi_set_avi_infoframe(struct drm_encoder *encoder) static void vc4_hdmi_set_avi_infoframe(struct drm_encoder *encoder)
{ {
struct vc4_hdmi_encoder *vc4_encoder = to_vc4_hdmi_encoder(encoder); struct vc4_hdmi_encoder *vc4_encoder = to_vc4_hdmi_encoder(encoder);
struct vc4_dev *vc4 = encoder->dev->dev_private;
struct vc4_hdmi *hdmi = vc4->hdmi;
struct drm_connector_state *cstate = hdmi->connector->state;
struct drm_crtc *crtc = encoder->crtc; struct drm_crtc *crtc = encoder->crtc;
const struct drm_display_mode *mode = &crtc->state->adjusted_mode; const struct drm_display_mode *mode = &crtc->state->adjusted_mode;
union hdmi_infoframe frame; union hdmi_infoframe frame;
@ -426,6 +437,11 @@ static void vc4_hdmi_set_avi_infoframe(struct drm_encoder *encoder)
vc4_encoder->rgb_range_selectable, vc4_encoder->rgb_range_selectable,
false); false);
frame.avi.right_bar = cstate->tv.margins.right;
frame.avi.left_bar = cstate->tv.margins.left;
frame.avi.top_bar = cstate->tv.margins.top;
frame.avi.bottom_bar = cstate->tv.margins.bottom;
vc4_hdmi_write_infoframe(encoder, &frame); vc4_hdmi_write_infoframe(encoder, &frame);
} }

View File

@ -432,7 +432,8 @@ int vc4_kms_load(struct drm_device *dev)
ctm_state = kzalloc(sizeof(*ctm_state), GFP_KERNEL); ctm_state = kzalloc(sizeof(*ctm_state), GFP_KERNEL);
if (!ctm_state) if (!ctm_state)
return -ENOMEM; return -ENOMEM;
drm_atomic_private_obj_init(&vc4->ctm_manager, &ctm_state->base,
drm_atomic_private_obj_init(dev, &vc4->ctm_manager, &ctm_state->base,
&vc4_ctm_state_funcs); &vc4_ctm_state_funcs);
drm_mode_config_reset(dev); drm_mode_config_reset(dev);

View File

@ -258,6 +258,52 @@ static u32 vc4_get_scl_field(struct drm_plane_state *state, int plane)
} }
} }
static int vc4_plane_margins_adj(struct drm_plane_state *pstate)
{
struct vc4_plane_state *vc4_pstate = to_vc4_plane_state(pstate);
unsigned int left, right, top, bottom, adjhdisplay, adjvdisplay;
struct drm_crtc_state *crtc_state;
crtc_state = drm_atomic_get_new_crtc_state(pstate->state,
pstate->crtc);
vc4_crtc_get_margins(crtc_state, &left, &right, &top, &bottom);
if (!left && !right && !top && !bottom)
return 0;
if (left + right >= crtc_state->mode.hdisplay ||
top + bottom >= crtc_state->mode.vdisplay)
return -EINVAL;
adjhdisplay = crtc_state->mode.hdisplay - (left + right);
vc4_pstate->crtc_x = DIV_ROUND_CLOSEST(vc4_pstate->crtc_x *
adjhdisplay,
crtc_state->mode.hdisplay);
vc4_pstate->crtc_x += left;
if (vc4_pstate->crtc_x > crtc_state->mode.hdisplay - left)
vc4_pstate->crtc_x = crtc_state->mode.hdisplay - left;
adjvdisplay = crtc_state->mode.vdisplay - (top + bottom);
vc4_pstate->crtc_y = DIV_ROUND_CLOSEST(vc4_pstate->crtc_y *
adjvdisplay,
crtc_state->mode.vdisplay);
vc4_pstate->crtc_y += top;
if (vc4_pstate->crtc_y > crtc_state->mode.vdisplay - top)
vc4_pstate->crtc_y = crtc_state->mode.vdisplay - top;
vc4_pstate->crtc_w = DIV_ROUND_CLOSEST(vc4_pstate->crtc_w *
adjhdisplay,
crtc_state->mode.hdisplay);
vc4_pstate->crtc_h = DIV_ROUND_CLOSEST(vc4_pstate->crtc_h *
adjvdisplay,
crtc_state->mode.vdisplay);
if (!vc4_pstate->crtc_w || !vc4_pstate->crtc_h)
return -EINVAL;
return 0;
}
static int vc4_plane_setup_clipping_and_scaling(struct drm_plane_state *state) static int vc4_plane_setup_clipping_and_scaling(struct drm_plane_state *state)
{ {
struct vc4_plane_state *vc4_state = to_vc4_plane_state(state); struct vc4_plane_state *vc4_state = to_vc4_plane_state(state);
@ -306,6 +352,10 @@ static int vc4_plane_setup_clipping_and_scaling(struct drm_plane_state *state)
vc4_state->crtc_w = state->dst.x2 - state->dst.x1; vc4_state->crtc_w = state->dst.x2 - state->dst.x1;
vc4_state->crtc_h = state->dst.y2 - state->dst.y1; vc4_state->crtc_h = state->dst.y2 - state->dst.y1;
ret = vc4_plane_margins_adj(state);
if (ret)
return ret;
vc4_state->x_scaling[0] = vc4_get_scaling_mode(vc4_state->src_w[0], vc4_state->x_scaling[0] = vc4_get_scaling_mode(vc4_state->src_w[0],
vc4_state->crtc_w); vc4_state->crtc_w);
vc4_state->y_scaling[0] = vc4_get_scaling_mode(vc4_state->src_h[0], vc4_state->y_scaling[0] = vc4_get_scaling_mode(vc4_state->src_h[0],
@ -492,8 +542,9 @@ static int vc4_plane_mode_set(struct drm_plane *plane,
bool mix_plane_alpha; bool mix_plane_alpha;
bool covers_screen; bool covers_screen;
u32 scl0, scl1, pitch0; u32 scl0, scl1, pitch0;
u32 tiling; u32 tiling, src_y;
u32 hvs_format = format->hvs; u32 hvs_format = format->hvs;
unsigned int rotation;
int ret, i; int ret, i;
if (vc4_state->dlist_initialized) if (vc4_state->dlist_initialized)
@ -520,6 +571,16 @@ static int vc4_plane_mode_set(struct drm_plane *plane,
h_subsample = drm_format_horz_chroma_subsampling(format->drm); h_subsample = drm_format_horz_chroma_subsampling(format->drm);
v_subsample = drm_format_vert_chroma_subsampling(format->drm); v_subsample = drm_format_vert_chroma_subsampling(format->drm);
rotation = drm_rotation_simplify(state->rotation,
DRM_MODE_ROTATE_0 |
DRM_MODE_REFLECT_X |
DRM_MODE_REFLECT_Y);
/* We must point to the last line when Y reflection is enabled. */
src_y = vc4_state->src_y;
if (rotation & DRM_MODE_REFLECT_Y)
src_y += vc4_state->src_h[0] - 1;
switch (base_format_mod) { switch (base_format_mod) {
case DRM_FORMAT_MOD_LINEAR: case DRM_FORMAT_MOD_LINEAR:
tiling = SCALER_CTL0_TILING_LINEAR; tiling = SCALER_CTL0_TILING_LINEAR;
@ -529,9 +590,10 @@ static int vc4_plane_mode_set(struct drm_plane *plane,
* out. * out.
*/ */
for (i = 0; i < num_planes; i++) { for (i = 0; i < num_planes; i++) {
vc4_state->offsets[i] += vc4_state->src_y / vc4_state->offsets[i] += src_y /
(i ? v_subsample : 1) * (i ? v_subsample : 1) *
fb->pitches[i]; fb->pitches[i];
vc4_state->offsets[i] += vc4_state->src_x / vc4_state->offsets[i] += vc4_state->src_x /
(i ? h_subsample : 1) * (i ? h_subsample : 1) *
fb->format->cpp[i]; fb->format->cpp[i];
@ -557,22 +619,38 @@ static int vc4_plane_mode_set(struct drm_plane *plane,
u32 tiles_w = fb->pitches[0] >> (tile_size_shift - tile_h_shift); u32 tiles_w = fb->pitches[0] >> (tile_size_shift - tile_h_shift);
u32 tiles_l = vc4_state->src_x >> tile_w_shift; u32 tiles_l = vc4_state->src_x >> tile_w_shift;
u32 tiles_r = tiles_w - tiles_l; u32 tiles_r = tiles_w - tiles_l;
u32 tiles_t = vc4_state->src_y >> tile_h_shift; u32 tiles_t = src_y >> tile_h_shift;
/* Intra-tile offsets, which modify the base address (the /* Intra-tile offsets, which modify the base address (the
* SCALER_PITCH0_TILE_Y_OFFSET tells HVS how to walk from that * SCALER_PITCH0_TILE_Y_OFFSET tells HVS how to walk from that
* base address). * base address).
*/ */
u32 tile_y = (vc4_state->src_y >> 4) & 1; u32 tile_y = (src_y >> 4) & 1;
u32 subtile_y = (vc4_state->src_y >> 2) & 3; u32 subtile_y = (src_y >> 2) & 3;
u32 utile_y = vc4_state->src_y & 3; u32 utile_y = src_y & 3;
u32 x_off = vc4_state->src_x & tile_w_mask; u32 x_off = vc4_state->src_x & tile_w_mask;
u32 y_off = vc4_state->src_y & tile_h_mask; u32 y_off = src_y & tile_h_mask;
/* When Y reflection is requested we must set the
* SCALER_PITCH0_TILE_LINE_DIR flag to tell HVS that all lines
* after the initial one should be fetched in descending order,
* which makes sense since we start from the last line and go
* backward.
* Don't know why we need y_off = max_y_off - y_off, but it's
* definitely required (I guess it's also related to the "going
* backward" situation).
*/
if (rotation & DRM_MODE_REFLECT_Y) {
y_off = tile_h_mask - y_off;
pitch0 = SCALER_PITCH0_TILE_LINE_DIR;
} else {
pitch0 = 0;
}
tiling = SCALER_CTL0_TILING_256B_OR_T; tiling = SCALER_CTL0_TILING_256B_OR_T;
pitch0 = (VC4_SET_FIELD(x_off, SCALER_PITCH0_SINK_PIX) | pitch0 |= (VC4_SET_FIELD(x_off, SCALER_PITCH0_SINK_PIX) |
VC4_SET_FIELD(y_off, SCALER_PITCH0_TILE_Y_OFFSET) | VC4_SET_FIELD(y_off, SCALER_PITCH0_TILE_Y_OFFSET) |
VC4_SET_FIELD(tiles_l, SCALER_PITCH0_TILE_WIDTH_L) | VC4_SET_FIELD(tiles_l, SCALER_PITCH0_TILE_WIDTH_L) |
VC4_SET_FIELD(tiles_r, SCALER_PITCH0_TILE_WIDTH_R)); VC4_SET_FIELD(tiles_r, SCALER_PITCH0_TILE_WIDTH_R));
vc4_state->offsets[0] += tiles_t * (tiles_w << tile_size_shift); vc4_state->offsets[0] += tiles_t * (tiles_w << tile_size_shift);
vc4_state->offsets[0] += subtile_y << 8; vc4_state->offsets[0] += subtile_y << 8;
vc4_state->offsets[0] += utile_y << 4; vc4_state->offsets[0] += utile_y << 4;
@ -595,6 +673,7 @@ static int vc4_plane_mode_set(struct drm_plane *plane,
case DRM_FORMAT_MOD_BROADCOM_SAND128: case DRM_FORMAT_MOD_BROADCOM_SAND128:
case DRM_FORMAT_MOD_BROADCOM_SAND256: { case DRM_FORMAT_MOD_BROADCOM_SAND256: {
uint32_t param = fourcc_mod_broadcom_param(fb->modifier); uint32_t param = fourcc_mod_broadcom_param(fb->modifier);
u32 tile_w, tile, x_off, pix_per_tile;
/* Column-based NV12 or RGBA. /* Column-based NV12 or RGBA.
*/ */
@ -614,12 +693,15 @@ static int vc4_plane_mode_set(struct drm_plane *plane,
switch (base_format_mod) { switch (base_format_mod) {
case DRM_FORMAT_MOD_BROADCOM_SAND64: case DRM_FORMAT_MOD_BROADCOM_SAND64:
tiling = SCALER_CTL0_TILING_64B; tiling = SCALER_CTL0_TILING_64B;
tile_w = 64;
break; break;
case DRM_FORMAT_MOD_BROADCOM_SAND128: case DRM_FORMAT_MOD_BROADCOM_SAND128:
tiling = SCALER_CTL0_TILING_128B; tiling = SCALER_CTL0_TILING_128B;
tile_w = 128;
break; break;
case DRM_FORMAT_MOD_BROADCOM_SAND256: case DRM_FORMAT_MOD_BROADCOM_SAND256:
tiling = SCALER_CTL0_TILING_256B_OR_T; tiling = SCALER_CTL0_TILING_256B_OR_T;
tile_w = 256;
break; break;
default: default:
break; break;
@ -630,6 +712,23 @@ static int vc4_plane_mode_set(struct drm_plane *plane,
return -EINVAL; return -EINVAL;
} }
pix_per_tile = tile_w / fb->format->cpp[0];
tile = vc4_state->src_x / pix_per_tile;
x_off = vc4_state->src_x % pix_per_tile;
/* Adjust the base pointer to the first pixel to be scanned
* out.
*/
for (i = 0; i < num_planes; i++) {
vc4_state->offsets[i] += param * tile_w * tile;
vc4_state->offsets[i] += src_y /
(i ? v_subsample : 1) *
tile_w;
vc4_state->offsets[i] += x_off /
(i ? h_subsample : 1) *
fb->format->cpp[i];
}
pitch0 = VC4_SET_FIELD(param, SCALER_TILE_HEIGHT); pitch0 = VC4_SET_FIELD(param, SCALER_TILE_HEIGHT);
break; break;
} }
@ -643,6 +742,8 @@ static int vc4_plane_mode_set(struct drm_plane *plane,
/* Control word */ /* Control word */
vc4_dlist_write(vc4_state, vc4_dlist_write(vc4_state,
SCALER_CTL0_VALID | SCALER_CTL0_VALID |
(rotation & DRM_MODE_REFLECT_X ? SCALER_CTL0_HFLIP : 0) |
(rotation & DRM_MODE_REFLECT_Y ? SCALER_CTL0_VFLIP : 0) |
VC4_SET_FIELD(SCALER_CTL0_RGBA_EXPAND_ROUND, SCALER_CTL0_RGBA_EXPAND) | VC4_SET_FIELD(SCALER_CTL0_RGBA_EXPAND_ROUND, SCALER_CTL0_RGBA_EXPAND) |
(format->pixel_order << SCALER_CTL0_ORDER_SHIFT) | (format->pixel_order << SCALER_CTL0_ORDER_SHIFT) |
(hvs_format << SCALER_CTL0_PIXEL_FORMAT_SHIFT) | (hvs_format << SCALER_CTL0_PIXEL_FORMAT_SHIFT) |
@ -1123,6 +1224,11 @@ struct drm_plane *vc4_plane_init(struct drm_device *dev,
drm_plane_helper_add(plane, &vc4_plane_helper_funcs); drm_plane_helper_add(plane, &vc4_plane_helper_funcs);
drm_plane_create_alpha_property(plane); drm_plane_create_alpha_property(plane);
drm_plane_create_rotation_property(plane, DRM_MODE_ROTATE_0,
DRM_MODE_ROTATE_0 |
DRM_MODE_ROTATE_180 |
DRM_MODE_REFLECT_X |
DRM_MODE_REFLECT_Y);
return plane; return plane;
} }

View File

@ -53,13 +53,13 @@ static void vgem_fence_release(struct dma_fence *base)
static void vgem_fence_value_str(struct dma_fence *fence, char *str, int size) static void vgem_fence_value_str(struct dma_fence *fence, char *str, int size)
{ {
snprintf(str, size, "%u", fence->seqno); snprintf(str, size, "%llu", fence->seqno);
} }
static void vgem_fence_timeline_value_str(struct dma_fence *fence, char *str, static void vgem_fence_timeline_value_str(struct dma_fence *fence, char *str,
int size) int size)
{ {
snprintf(str, size, "%u", snprintf(str, size, "%llu",
dma_fence_is_signaled(fence) ? fence->seqno : 0); dma_fence_is_signaled(fence) ? fence->seqno : 0);
} }

View File

@ -390,6 +390,5 @@ void virtio_gpu_modeset_fini(struct virtio_gpu_device *vgdev)
for (i = 0 ; i < vgdev->num_scanouts; ++i) for (i = 0 ; i < vgdev->num_scanouts; ++i)
kfree(vgdev->outputs[i].edid); kfree(vgdev->outputs[i].edid);
virtio_gpu_fbdev_fini(vgdev);
drm_mode_config_cleanup(vgdev->ddev); drm_mode_config_cleanup(vgdev->ddev);
} }

View File

@ -42,13 +42,20 @@ module_param_named(modeset, virtio_gpu_modeset, int, 0400);
static int virtio_gpu_probe(struct virtio_device *vdev) static int virtio_gpu_probe(struct virtio_device *vdev)
{ {
int ret;
if (vgacon_text_force() && virtio_gpu_modeset == -1) if (vgacon_text_force() && virtio_gpu_modeset == -1)
return -EINVAL; return -EINVAL;
if (virtio_gpu_modeset == 0) if (virtio_gpu_modeset == 0)
return -EINVAL; return -EINVAL;
return drm_virtio_init(&driver, vdev); ret = drm_virtio_init(&driver, vdev);
if (ret)
return ret;
drm_fbdev_generic_setup(vdev->priv, 32);
return 0;
} }
static void virtio_gpu_remove(struct virtio_device *vdev) static void virtio_gpu_remove(struct virtio_device *vdev)

View File

@ -137,19 +137,10 @@ struct virtio_gpu_framebuffer {
#define to_virtio_gpu_framebuffer(x) \ #define to_virtio_gpu_framebuffer(x) \
container_of(x, struct virtio_gpu_framebuffer, base) container_of(x, struct virtio_gpu_framebuffer, base)
struct virtio_gpu_fbdev {
struct drm_fb_helper helper;
struct virtio_gpu_framebuffer vgfb;
struct virtio_gpu_device *vgdev;
struct delayed_work work;
};
struct virtio_gpu_mman { struct virtio_gpu_mman {
struct ttm_bo_device bdev; struct ttm_bo_device bdev;
}; };
struct virtio_gpu_fbdev;
struct virtio_gpu_queue { struct virtio_gpu_queue {
struct virtqueue *vq; struct virtqueue *vq;
spinlock_t qlock; spinlock_t qlock;
@ -180,8 +171,6 @@ struct virtio_gpu_device {
struct virtio_gpu_mman mman; struct virtio_gpu_mman mman;
/* pointer to fbdev info structure */
struct virtio_gpu_fbdev *vgfbdev;
struct virtio_gpu_output outputs[VIRTIO_GPU_MAX_SCANOUTS]; struct virtio_gpu_output outputs[VIRTIO_GPU_MAX_SCANOUTS];
uint32_t num_scanouts; uint32_t num_scanouts;
@ -249,9 +238,6 @@ int virtio_gpu_mode_dumb_mmap(struct drm_file *file_priv,
uint32_t handle, uint64_t *offset_p); uint32_t handle, uint64_t *offset_p);
/* virtio_fb */ /* virtio_fb */
#define VIRTIO_GPUFB_CONN_LIMIT 1
int virtio_gpu_fbdev_init(struct virtio_gpu_device *vgdev);
void virtio_gpu_fbdev_fini(struct virtio_gpu_device *vgdev);
int virtio_gpu_surface_dirty(struct virtio_gpu_framebuffer *qfb, int virtio_gpu_surface_dirty(struct virtio_gpu_framebuffer *qfb,
struct drm_clip_rect *clips, struct drm_clip_rect *clips,
unsigned int num_clips); unsigned int num_clips);

View File

@ -27,8 +27,6 @@
#include <drm/drm_fb_helper.h> #include <drm/drm_fb_helper.h>
#include "virtgpu_drv.h" #include "virtgpu_drv.h"
#define VIRTIO_GPU_FBCON_POLL_PERIOD (HZ / 60)
static int virtio_gpu_dirty_update(struct virtio_gpu_framebuffer *fb, static int virtio_gpu_dirty_update(struct virtio_gpu_framebuffer *fb,
bool store, int x, int y, bool store, int x, int y,
int width, int height) int width, int height)
@ -150,192 +148,3 @@ int virtio_gpu_surface_dirty(struct virtio_gpu_framebuffer *vgfb,
left, top, right - left, bottom - top); left, top, right - left, bottom - top);
return 0; return 0;
} }
static void virtio_gpu_fb_dirty_work(struct work_struct *work)
{
struct delayed_work *delayed_work = to_delayed_work(work);
struct virtio_gpu_fbdev *vfbdev =
container_of(delayed_work, struct virtio_gpu_fbdev, work);
struct virtio_gpu_framebuffer *vgfb = &vfbdev->vgfb;
virtio_gpu_dirty_update(&vfbdev->vgfb, false, vgfb->x1, vgfb->y1,
vgfb->x2 - vgfb->x1, vgfb->y2 - vgfb->y1);
}
static void virtio_gpu_3d_fillrect(struct fb_info *info,
const struct fb_fillrect *rect)
{
struct virtio_gpu_fbdev *vfbdev = info->par;
drm_fb_helper_sys_fillrect(info, rect);
virtio_gpu_dirty_update(&vfbdev->vgfb, true, rect->dx, rect->dy,
rect->width, rect->height);
schedule_delayed_work(&vfbdev->work, VIRTIO_GPU_FBCON_POLL_PERIOD);
}
static void virtio_gpu_3d_copyarea(struct fb_info *info,
const struct fb_copyarea *area)
{
struct virtio_gpu_fbdev *vfbdev = info->par;
drm_fb_helper_sys_copyarea(info, area);
virtio_gpu_dirty_update(&vfbdev->vgfb, true, area->dx, area->dy,
area->width, area->height);
schedule_delayed_work(&vfbdev->work, VIRTIO_GPU_FBCON_POLL_PERIOD);
}
static void virtio_gpu_3d_imageblit(struct fb_info *info,
const struct fb_image *image)
{
struct virtio_gpu_fbdev *vfbdev = info->par;
drm_fb_helper_sys_imageblit(info, image);
virtio_gpu_dirty_update(&vfbdev->vgfb, true, image->dx, image->dy,
image->width, image->height);
schedule_delayed_work(&vfbdev->work, VIRTIO_GPU_FBCON_POLL_PERIOD);
}
static struct fb_ops virtio_gpufb_ops = {
.owner = THIS_MODULE,
DRM_FB_HELPER_DEFAULT_OPS,
.fb_fillrect = virtio_gpu_3d_fillrect,
.fb_copyarea = virtio_gpu_3d_copyarea,
.fb_imageblit = virtio_gpu_3d_imageblit,
};
static int virtio_gpufb_create(struct drm_fb_helper *helper,
struct drm_fb_helper_surface_size *sizes)
{
struct virtio_gpu_fbdev *vfbdev =
container_of(helper, struct virtio_gpu_fbdev, helper);
struct drm_device *dev = helper->dev;
struct virtio_gpu_device *vgdev = dev->dev_private;
struct fb_info *info;
struct drm_framebuffer *fb;
struct drm_mode_fb_cmd2 mode_cmd = {};
struct virtio_gpu_object *obj;
uint32_t format, size;
int ret;
mode_cmd.width = sizes->surface_width;
mode_cmd.height = sizes->surface_height;
mode_cmd.pitches[0] = mode_cmd.width * 4;
mode_cmd.pixel_format = DRM_FORMAT_HOST_XRGB8888;
format = virtio_gpu_translate_format(mode_cmd.pixel_format);
if (format == 0)
return -EINVAL;
size = mode_cmd.pitches[0] * mode_cmd.height;
obj = virtio_gpu_alloc_object(dev, size, false, true);
if (IS_ERR(obj))
return PTR_ERR(obj);
virtio_gpu_cmd_create_resource(vgdev, obj, format,
mode_cmd.width, mode_cmd.height);
ret = virtio_gpu_object_kmap(obj);
if (ret) {
DRM_ERROR("failed to kmap fb %d\n", ret);
goto err_obj_vmap;
}
/* attach the object to the resource */
ret = virtio_gpu_object_attach(vgdev, obj, NULL);
if (ret)
goto err_obj_attach;
info = drm_fb_helper_alloc_fbi(helper);
if (IS_ERR(info)) {
ret = PTR_ERR(info);
goto err_fb_alloc;
}
info->par = helper;
ret = virtio_gpu_framebuffer_init(dev, &vfbdev->vgfb,
&mode_cmd, &obj->gem_base);
if (ret)
goto err_fb_alloc;
fb = &vfbdev->vgfb.base;
vfbdev->helper.fb = fb;
strcpy(info->fix.id, "virtiodrmfb");
info->fbops = &virtio_gpufb_ops;
info->pixmap.flags = FB_PIXMAP_SYSTEM;
info->screen_buffer = obj->vmap;
info->screen_size = obj->gem_base.size;
drm_fb_helper_fill_fix(info, fb->pitches[0], fb->format->depth);
drm_fb_helper_fill_var(info, &vfbdev->helper,
sizes->fb_width, sizes->fb_height);
info->fix.mmio_start = 0;
info->fix.mmio_len = 0;
return 0;
err_fb_alloc:
virtio_gpu_object_detach(vgdev, obj);
err_obj_attach:
err_obj_vmap:
virtio_gpu_gem_free_object(&obj->gem_base);
return ret;
}
static int virtio_gpu_fbdev_destroy(struct drm_device *dev,
struct virtio_gpu_fbdev *vgfbdev)
{
struct virtio_gpu_framebuffer *vgfb = &vgfbdev->vgfb;
drm_fb_helper_unregister_fbi(&vgfbdev->helper);
if (vgfb->base.obj[0])
vgfb->base.obj[0] = NULL;
drm_fb_helper_fini(&vgfbdev->helper);
drm_framebuffer_cleanup(&vgfb->base);
return 0;
}
static const struct drm_fb_helper_funcs virtio_gpu_fb_helper_funcs = {
.fb_probe = virtio_gpufb_create,
};
int virtio_gpu_fbdev_init(struct virtio_gpu_device *vgdev)
{
struct virtio_gpu_fbdev *vgfbdev;
int bpp_sel = 32; /* TODO: parameter from somewhere? */
int ret;
vgfbdev = kzalloc(sizeof(struct virtio_gpu_fbdev), GFP_KERNEL);
if (!vgfbdev)
return -ENOMEM;
vgfbdev->vgdev = vgdev;
vgdev->vgfbdev = vgfbdev;
INIT_DELAYED_WORK(&vgfbdev->work, virtio_gpu_fb_dirty_work);
drm_fb_helper_prepare(vgdev->ddev, &vgfbdev->helper,
&virtio_gpu_fb_helper_funcs);
ret = drm_fb_helper_init(vgdev->ddev, &vgfbdev->helper,
VIRTIO_GPUFB_CONN_LIMIT);
if (ret) {
kfree(vgfbdev);
return ret;
}
drm_fb_helper_single_add_all_connectors(&vgfbdev->helper);
drm_fb_helper_initial_config(&vgfbdev->helper, bpp_sel);
return 0;
}
void virtio_gpu_fbdev_fini(struct virtio_gpu_device *vgdev)
{
if (!vgdev->vgfbdev)
return;
virtio_gpu_fbdev_destroy(vgdev->ddev, vgdev->vgfbdev);
kfree(vgdev->vgfbdev);
vgdev->vgfbdev = NULL;
}

View File

@ -28,11 +28,6 @@
#include <drm/drmP.h> #include <drm/drmP.h>
#include "virtgpu_drv.h" #include "virtgpu_drv.h"
static int virtio_gpu_fbdev = 1;
MODULE_PARM_DESC(fbdev, "Disable/Enable framebuffer device & console");
module_param_named(fbdev, virtio_gpu_fbdev, int, 0400);
static void virtio_gpu_config_changed_work_func(struct work_struct *work) static void virtio_gpu_config_changed_work_func(struct work_struct *work)
{ {
struct virtio_gpu_device *vgdev = struct virtio_gpu_device *vgdev =
@ -212,9 +207,6 @@ int virtio_gpu_driver_load(struct drm_device *dev, unsigned long flags)
virtio_gpu_cmd_get_display_info(vgdev); virtio_gpu_cmd_get_display_info(vgdev);
wait_event_timeout(vgdev->resp_wq, !vgdev->display_info_pending, wait_event_timeout(vgdev->resp_wq, !vgdev->display_info_pending,
5 * HZ); 5 * HZ);
if (virtio_gpu_fbdev)
virtio_gpu_fbdev_init(vgdev);
return 0; return 0;
err_modeset: err_modeset:

View File

@ -95,6 +95,7 @@ static int vkms_modeset_init(struct vkms_device *vkmsdev)
dev->mode_config.min_height = YRES_MIN; dev->mode_config.min_height = YRES_MIN;
dev->mode_config.max_width = XRES_MAX; dev->mode_config.max_width = XRES_MAX;
dev->mode_config.max_height = YRES_MAX; dev->mode_config.max_height = YRES_MAX;
dev->mode_config.preferred_depth = 24;
return vkms_output_init(vkmsdev); return vkms_output_init(vkmsdev);
} }

View File

@ -89,7 +89,6 @@ static const struct drm_connector_helper_funcs connector_helper_funcs = {
}; };
static const struct drm_connector_funcs connector_funcs = { static const struct drm_connector_funcs connector_funcs = {
.dpms = drm_helper_connector_dpms,
.fill_modes = drm_helper_probe_single_connector_modes, .fill_modes = drm_helper_probe_single_connector_modes,
.destroy = drm_connector_cleanup, .destroy = drm_connector_cleanup,
.reset = drm_atomic_helper_connector_reset, .reset = drm_atomic_helper_connector_reset,

View File

@ -94,8 +94,6 @@ struct dma_buf_attachment;
struct pci_dev; struct pci_dev;
struct pci_controller; struct pci_controller;
#define DRM_IF_VERSION(maj, min) (maj << 16 | min)
#define DRM_SWITCH_POWER_ON 0 #define DRM_SWITCH_POWER_ON 0
#define DRM_SWITCH_POWER_OFF 1 #define DRM_SWITCH_POWER_OFF 1
#define DRM_SWITCH_POWER_CHANGING 2 #define DRM_SWITCH_POWER_CHANGING 2

View File

@ -228,8 +228,30 @@ struct drm_private_state_funcs {
* Currently only tracks the state update functions and the opaque driver * Currently only tracks the state update functions and the opaque driver
* private state itself, but in the future might also track which * private state itself, but in the future might also track which
* &drm_modeset_lock is required to duplicate and update this object's state. * &drm_modeset_lock is required to duplicate and update this object's state.
*
* All private objects must be initialized before the DRM device they are
* attached to is registered to the DRM subsystem (call to drm_dev_register())
* and should stay around until this DRM device is unregistered (call to
* drm_dev_unregister()). In other words, private objects lifetime is tied
* to the DRM device lifetime. This implies that:
*
* 1/ all calls to drm_atomic_private_obj_init() must be done before calling
* drm_dev_register()
* 2/ all calls to drm_atomic_private_obj_fini() must be done after calling
* drm_dev_unregister()
*/ */
struct drm_private_obj { struct drm_private_obj {
/**
* @head: List entry used to attach a private object to a &drm_device
* (queued to &drm_mode_config.privobj_list).
*/
struct list_head head;
/**
* @lock: Modeset lock to protect the state object.
*/
struct drm_modeset_lock lock;
/** /**
* @state: Current atomic state for this driver private object. * @state: Current atomic state for this driver private object.
*/ */
@ -244,6 +266,18 @@ struct drm_private_obj {
const struct drm_private_state_funcs *funcs; const struct drm_private_state_funcs *funcs;
}; };
/**
* drm_for_each_privobj() - private object iterator
*
* @privobj: pointer to the current private object. Updated after each
* iteration
* @dev: the DRM device we want get private objects from
*
* Allows one to iterate over all private objects attached to @dev
*/
#define drm_for_each_privobj(privobj, dev) \
list_for_each_entry(privobj, &(dev)->mode_config.privobj_list, head)
/** /**
* struct drm_private_state - base struct for driver private object state * struct drm_private_state - base struct for driver private object state
* @state: backpointer to global drm_atomic_state * @state: backpointer to global drm_atomic_state
@ -400,7 +434,8 @@ struct drm_connector_state * __must_check
drm_atomic_get_connector_state(struct drm_atomic_state *state, drm_atomic_get_connector_state(struct drm_atomic_state *state,
struct drm_connector *connector); struct drm_connector *connector);
void drm_atomic_private_obj_init(struct drm_private_obj *obj, void drm_atomic_private_obj_init(struct drm_device *dev,
struct drm_private_obj *obj,
struct drm_private_state *state, struct drm_private_state *state,
const struct drm_private_state_funcs *funcs); const struct drm_private_state_funcs *funcs);
void drm_atomic_private_obj_fini(struct drm_private_obj *obj); void drm_atomic_private_obj_fini(struct drm_private_obj *obj);

View File

@ -394,7 +394,7 @@ int drm_display_info_set_bus_formats(struct drm_display_info *info,
/** /**
* struct drm_tv_connector_state - TV connector related states * struct drm_tv_connector_state - TV connector related states
* @subconnector: selected subconnector * @subconnector: selected subconnector
* @margins: margins * @margins: margins (all margins are expressed in pixels)
* @margins.left: left margin * @margins.left: left margin
* @margins.right: right margin * @margins.right: right margin
* @margins.top: top margin * @margins.top: top margin
@ -1249,9 +1249,11 @@ const char *drm_get_tv_select_name(int val);
const char *drm_get_content_protection_name(int val); const char *drm_get_content_protection_name(int val);
int drm_mode_create_dvi_i_properties(struct drm_device *dev); int drm_mode_create_dvi_i_properties(struct drm_device *dev);
int drm_mode_create_tv_margin_properties(struct drm_device *dev);
int drm_mode_create_tv_properties(struct drm_device *dev, int drm_mode_create_tv_properties(struct drm_device *dev,
unsigned int num_modes, unsigned int num_modes,
const char * const modes[]); const char * const modes[]);
void drm_connector_attach_tv_margin_properties(struct drm_connector *conn);
int drm_mode_create_scaling_mode_property(struct drm_device *dev); int drm_mode_create_scaling_mode_property(struct drm_device *dev);
int drm_connector_attach_content_type_property(struct drm_connector *dev); int drm_connector_attach_content_type_property(struct drm_connector *dev);
int drm_connector_attach_scaling_mode_property(struct drm_connector *connector, int drm_connector_attach_scaling_mode_property(struct drm_connector *connector,

View File

@ -387,8 +387,6 @@ struct drm_dp_mst_topology_cbs {
void (*register_connector)(struct drm_connector *connector); void (*register_connector)(struct drm_connector *connector);
void (*destroy_connector)(struct drm_dp_mst_topology_mgr *mgr, void (*destroy_connector)(struct drm_dp_mst_topology_mgr *mgr,
struct drm_connector *connector); struct drm_connector *connector);
void (*hotplug)(struct drm_dp_mst_topology_mgr *mgr);
}; };
#define DP_MAX_PAYLOAD (sizeof(unsigned long) * 8) #define DP_MAX_PAYLOAD (sizeof(unsigned long) * 8)

View File

@ -32,6 +32,7 @@
#include <linux/types.h> #include <linux/types.h>
#include <linux/completion.h> #include <linux/completion.h>
#include <linux/idr.h>
#include <uapi/drm/drm.h> #include <uapi/drm/drm.h>

View File

@ -9,6 +9,8 @@
#ifndef _DRM_HDCP_H_INCLUDED_ #ifndef _DRM_HDCP_H_INCLUDED_
#define _DRM_HDCP_H_INCLUDED_ #define _DRM_HDCP_H_INCLUDED_
#include <linux/types.h>
/* Period of hdcp checks (to ensure we're still authenticated) */ /* Period of hdcp checks (to ensure we're still authenticated) */
#define DRM_HDCP_CHECK_PERIOD_MS (128 * 16) #define DRM_HDCP_CHECK_PERIOD_MS (128 * 16)

View File

@ -2,6 +2,9 @@
#define __DRM_DRM_LEGACY_H__ #define __DRM_DRM_LEGACY_H__
#include <drm/drm_auth.h> #include <drm/drm_auth.h>
#include <drm/drm_hashtab.h>
struct drm_device;
/* /*
* Legacy driver interfaces for the Direct Rendering Manager * Legacy driver interfaces for the Direct Rendering Manager
@ -156,6 +159,7 @@ struct drm_map_list {
int drm_legacy_addmap(struct drm_device *d, resource_size_t offset, int drm_legacy_addmap(struct drm_device *d, resource_size_t offset,
unsigned int size, enum drm_map_type type, unsigned int size, enum drm_map_type type,
enum drm_map_flags flags, struct drm_local_map **map_p); enum drm_map_flags flags, struct drm_local_map **map_p);
struct drm_local_map *drm_legacy_findmap(struct drm_device *dev, unsigned int token);
void drm_legacy_rmmap(struct drm_device *d, struct drm_local_map *map); void drm_legacy_rmmap(struct drm_device *d, struct drm_local_map *map);
int drm_legacy_rmmap_locked(struct drm_device *d, struct drm_local_map *map); int drm_legacy_rmmap_locked(struct drm_device *d, struct drm_local_map *map);
void drm_legacy_master_rmmaps(struct drm_device *dev, void drm_legacy_master_rmmaps(struct drm_device *dev,
@ -194,14 +198,4 @@ void drm_legacy_ioremap(struct drm_local_map *map, struct drm_device *dev);
void drm_legacy_ioremap_wc(struct drm_local_map *map, struct drm_device *dev); void drm_legacy_ioremap_wc(struct drm_local_map *map, struct drm_device *dev);
void drm_legacy_ioremapfree(struct drm_local_map *map, struct drm_device *dev); void drm_legacy_ioremapfree(struct drm_local_map *map, struct drm_device *dev);
static inline struct drm_local_map *drm_legacy_findmap(struct drm_device *dev,
unsigned int token)
{
struct drm_map_list *_entry;
list_for_each_entry(_entry, &dev->maplist, head)
if (_entry->user_token == token)
return _entry->map;
return NULL;
}
#endif /* __DRM_DRM_LEGACY_H__ */ #endif /* __DRM_DRM_LEGACY_H__ */

View File

@ -391,18 +391,18 @@ struct drm_mode_config {
/** /**
* @idr_mutex: * @idr_mutex:
* *
* Mutex for KMS ID allocation and management. Protects both @crtc_idr * Mutex for KMS ID allocation and management. Protects both @object_idr
* and @tile_idr. * and @tile_idr.
*/ */
struct mutex idr_mutex; struct mutex idr_mutex;
/** /**
* @crtc_idr: * @object_idr:
* *
* Main KMS ID tracking object. Use this idr for all IDs, fb, crtc, * Main KMS ID tracking object. Use this idr for all IDs, fb, crtc,
* connector, modes - just makes life easier to have only one. * connector, modes - just makes life easier to have only one.
*/ */
struct idr crtc_idr; struct idr object_idr;
/** /**
* @tile_idr: * @tile_idr:
@ -512,6 +512,15 @@ struct drm_mode_config {
*/ */
struct list_head property_list; struct list_head property_list;
/**
* @privobj_list:
*
* List of private objects linked with &drm_private_obj.head. This is
* invariant over the lifetime of a device and hence doesn't need any
* locks.
*/
struct list_head privobj_list;
int min_width, min_height; int min_width, min_height;
int max_width, max_height; int max_width, max_height;
const struct drm_mode_config_funcs *funcs; const struct drm_mode_config_funcs *funcs;
@ -688,22 +697,22 @@ struct drm_mode_config {
struct drm_property *tv_mode_property; struct drm_property *tv_mode_property;
/** /**
* @tv_left_margin_property: Optional TV property to set the left * @tv_left_margin_property: Optional TV property to set the left
* margin. * margin (expressed in pixels).
*/ */
struct drm_property *tv_left_margin_property; struct drm_property *tv_left_margin_property;
/** /**
* @tv_right_margin_property: Optional TV property to set the right * @tv_right_margin_property: Optional TV property to set the right
* margin. * margin (expressed in pixels).
*/ */
struct drm_property *tv_right_margin_property; struct drm_property *tv_right_margin_property;
/** /**
* @tv_top_margin_property: Optional TV property to set the right * @tv_top_margin_property: Optional TV property to set the right
* margin. * margin (expressed in pixels).
*/ */
struct drm_property *tv_top_margin_property; struct drm_property *tv_top_margin_property;
/** /**
* @tv_bottom_margin_property: Optional TV property to set the right * @tv_bottom_margin_property: Optional TV property to set the right
* margin. * margin (expressed in pixels).
*/ */
struct drm_property *tv_bottom_margin_property; struct drm_property *tv_bottom_margin_property;
/** /**

View File

@ -26,9 +26,9 @@
#ifndef __DRM_SYNCOBJ_H__ #ifndef __DRM_SYNCOBJ_H__
#define __DRM_SYNCOBJ_H__ #define __DRM_SYNCOBJ_H__
#include "linux/dma-fence.h" #include <linux/dma-fence.h>
struct drm_syncobj_cb; struct drm_file;
/** /**
* struct drm_syncobj - sync object. * struct drm_syncobj - sync object.
@ -62,25 +62,6 @@ struct drm_syncobj {
struct file *file; struct file *file;
}; };
typedef void (*drm_syncobj_func_t)(struct drm_syncobj *syncobj,
struct drm_syncobj_cb *cb);
/**
* struct drm_syncobj_cb - callback for drm_syncobj_add_callback
* @node: used by drm_syncob_add_callback to append this struct to
* &drm_syncobj.cb_list
* @func: drm_syncobj_func_t to call
*
* This struct will be initialized by drm_syncobj_add_callback, additional
* data can be passed along by embedding drm_syncobj_cb in another struct.
* The callback will get called the next time drm_syncobj_replace_fence is
* called.
*/
struct drm_syncobj_cb {
struct list_head node;
drm_syncobj_func_t func;
};
void drm_syncobj_free(struct kref *kref); void drm_syncobj_free(struct kref *kref);
/** /**

View File

@ -4,6 +4,9 @@
#ifndef _DRM_INTEL_GTT_H #ifndef _DRM_INTEL_GTT_H
#define _DRM_INTEL_GTT_H #define _DRM_INTEL_GTT_H
#include <linux/agp_backend.h>
#include <linux/kernel.h>
void intel_gtt_get(u64 *gtt_total, void intel_gtt_get(u64 *gtt_total,
phys_addr_t *mappable_base, phys_addr_t *mappable_base,
resource_size_t *mappable_end); resource_size_t *mappable_end);

View File

@ -77,7 +77,7 @@ struct dma_fence {
struct list_head cb_list; struct list_head cb_list;
spinlock_t *lock; spinlock_t *lock;
u64 context; u64 context;
unsigned seqno; u64 seqno;
unsigned long flags; unsigned long flags;
ktime_t timestamp; ktime_t timestamp;
int error; int error;
@ -244,7 +244,7 @@ struct dma_fence_ops {
}; };
void dma_fence_init(struct dma_fence *fence, const struct dma_fence_ops *ops, void dma_fence_init(struct dma_fence *fence, const struct dma_fence_ops *ops,
spinlock_t *lock, u64 context, unsigned seqno); spinlock_t *lock, u64 context, u64 seqno);
void dma_fence_release(struct kref *kref); void dma_fence_release(struct kref *kref);
void dma_fence_free(struct dma_fence *fence); void dma_fence_free(struct dma_fence *fence);
@ -414,9 +414,17 @@ dma_fence_is_signaled(struct dma_fence *fence)
* Returns true if f1 is chronologically later than f2. Both fences must be * Returns true if f1 is chronologically later than f2. Both fences must be
* from the same context, since a seqno is not common across contexts. * from the same context, since a seqno is not common across contexts.
*/ */
static inline bool __dma_fence_is_later(u32 f1, u32 f2) static inline bool __dma_fence_is_later(u64 f1, u64 f2)
{ {
return (int)(f1 - f2) > 0; /* This is for backward compatibility with drivers which can only handle
* 32bit sequence numbers. Use a 64bit compare when any of the higher
* bits are none zero, otherwise use a 32bit compare with wrap around
* handling.
*/
if (upper_32_bits(f1) || upper_32_bits(f2))
return f1 > f2;
return (int)(lower_32_bits(f1) - lower_32_bits(f2)) > 0;
} }
/** /**
@ -548,21 +556,21 @@ u64 dma_fence_context_alloc(unsigned num);
do { \ do { \
struct dma_fence *__ff = (f); \ struct dma_fence *__ff = (f); \
if (IS_ENABLED(CONFIG_DMA_FENCE_TRACE)) \ if (IS_ENABLED(CONFIG_DMA_FENCE_TRACE)) \
pr_info("f %llu#%u: " fmt, \ pr_info("f %llu#%llu: " fmt, \
__ff->context, __ff->seqno, ##args); \ __ff->context, __ff->seqno, ##args); \
} while (0) } while (0)
#define DMA_FENCE_WARN(f, fmt, args...) \ #define DMA_FENCE_WARN(f, fmt, args...) \
do { \ do { \
struct dma_fence *__ff = (f); \ struct dma_fence *__ff = (f); \
pr_warn("f %llu#%u: " fmt, __ff->context, __ff->seqno, \ pr_warn("f %llu#%llu: " fmt, __ff->context, __ff->seqno,\
##args); \ ##args); \
} while (0) } while (0)
#define DMA_FENCE_ERR(f, fmt, args...) \ #define DMA_FENCE_ERR(f, fmt, args...) \
do { \ do { \
struct dma_fence *__ff = (f); \ struct dma_fence *__ff = (f); \
pr_err("f %llu#%u: " fmt, __ff->context, __ff->seqno, \ pr_err("f %llu#%llu: " fmt, __ff->context, __ff->seqno, \
##args); \ ##args); \
} while (0) } while (0)

View File

@ -52,6 +52,14 @@ extern "C" {
* *
* This asks the kernel to have the GPU execute an optional binner * This asks the kernel to have the GPU execute an optional binner
* command list, and a render command list. * command list, and a render command list.
*
* The L1T, slice, L2C, L2T, and GCA caches will be flushed before
* each CL executes. The VCD cache should be flushed (if necessary)
* by the submitted CLs. The TLB writes are guaranteed to have been
* flushed by the time the render done IRQ happens, which is the
* trigger for out_sync. Any dirtying of cachelines by the job (only
* possible using TMU writes) must be flushed by the caller using the
* CL's cache flush commands.
*/ */
struct drm_v3d_submit_cl { struct drm_v3d_submit_cl {
/* Pointer to the binner command list. /* Pointer to the binner command list.