From: Dave Airlie <airlied@xxxxxxxxxx> This is step one towards allocating these on demand for legacy devices. First group all the legacy struct members into their own structure and include it into the main drm driver structure directly. Signed-off-by: Dave Airlie <airlied@xxxxxxxxxx> --- drivers/gpu/drm/ati_pcigart.c | 4 +- drivers/gpu/drm/drm_bufs.c | 144 +++++++++++++++++----------------- drivers/gpu/drm/drm_context.c | 64 +++++++-------- drivers/gpu/drm/drm_dma.c | 18 ++--- drivers/gpu/drm/drm_drv.c | 62 ++++++++++----- drivers/gpu/drm/drm_fops.c | 10 +-- drivers/gpu/drm/drm_info.c | 4 +- drivers/gpu/drm/drm_ioctl.c | 2 +- drivers/gpu/drm/drm_lock.c | 20 ++--- drivers/gpu/drm/drm_scatter.c | 14 ++-- drivers/gpu/drm/drm_vm.c | 14 ++-- drivers/gpu/drm/i810/i810_dma.c | 22 +++--- drivers/gpu/drm/mga/mga_dma.c | 42 +++++----- drivers/gpu/drm/mga/mga_state.c | 6 +- drivers/gpu/drm/r128/r128_cce.c | 34 ++++---- drivers/gpu/drm/r128/r128_state.c | 18 ++--- drivers/gpu/drm/radeon/r300_cmdbuf.c | 2 +- drivers/gpu/drm/radeon/r600_blit.c | 6 +- drivers/gpu/drm/radeon/r600_cp.c | 44 +++++------ drivers/gpu/drm/radeon/radeon_cp.c | 42 +++++----- drivers/gpu/drm/radeon/radeon_state.c | 16 ++-- drivers/gpu/drm/savage/savage_bci.c | 26 +++--- drivers/gpu/drm/savage/savage_state.c | 2 +- drivers/gpu/drm/sis/sis_mm.c | 2 +- drivers/gpu/drm/via/via_mm.c | 2 +- drivers/gpu/drm/via/via_verifier.c | 2 +- include/drm/drmP.h | 93 +++++++++++++--------- include/drm/drm_legacy.h | 2 +- 28 files changed, 379 insertions(+), 338 deletions(-) diff --git a/drivers/gpu/drm/ati_pcigart.c b/drivers/gpu/drm/ati_pcigart.c index 6c4d4b6..6b0a140 100644 --- a/drivers/gpu/drm/ati_pcigart.c +++ b/drivers/gpu/drm/ati_pcigart.c @@ -58,7 +58,7 @@ static void drm_ati_free_pcigart_table(struct drm_device *dev, int drm_ati_pcigart_cleanup(struct drm_device *dev, struct drm_ati_pcigart_info *gart_info) { - struct drm_sg_mem *entry = dev->sg; + struct drm_sg_mem *entry = dev->legacy.sg; unsigned long pages; int i; int max_pages; @@ -98,7 +98,7 @@ EXPORT_SYMBOL(drm_ati_pcigart_cleanup); int drm_ati_pcigart_init(struct drm_device *dev, struct drm_ati_pcigart_info *gart_info) { struct drm_local_map *map = &gart_info->mapping; - struct drm_sg_mem *entry = dev->sg; + struct drm_sg_mem *entry = dev->legacy.sg; void *address = NULL; unsigned long pages; u32 *pci_gart = NULL, page_base, gart_idx; diff --git a/drivers/gpu/drm/drm_bufs.c b/drivers/gpu/drm/drm_bufs.c index 569064a..6bfff25 100644 --- a/drivers/gpu/drm/drm_bufs.c +++ b/drivers/gpu/drm/drm_bufs.c @@ -40,7 +40,7 @@ static struct drm_map_list *drm_find_matching_map(struct drm_device *dev, struct drm_local_map *map) { struct drm_map_list *entry; - list_for_each_entry(entry, &dev->maplist, head) { + list_for_each_entry(entry, &dev->legacy.maplist, head) { /* * Because the kernel-userspace ABI is fixed at a 32-bit offset * while PCI resources may live above that, we only compare the @@ -90,7 +90,7 @@ static int drm_map_handle(struct drm_device *dev, struct drm_hash_item *hash, if (!use_hashed_handle) { int ret; hash->key = user_token >> PAGE_SHIFT; - ret = drm_ht_insert_item(&dev->map_hash, hash); + ret = drm_ht_insert_item(&dev->legacy.map_hash, hash); if (ret != -EINVAL) return ret; } @@ -116,7 +116,7 @@ static int drm_map_handle(struct drm_device *dev, struct drm_hash_item *hash, add |= ((user_token >> PAGE_SHIFT) & ((1UL << bits) - 1UL)); } - return drm_ht_just_insert_please(&dev->map_hash, hash, + return drm_ht_just_insert_please(&dev->legacy.map_hash, hash, user_token, 32 - PAGE_SHIFT - 3, shift, add); } @@ -126,7 +126,7 @@ static int drm_map_handle(struct drm_device *dev, struct drm_hash_item *hash, * non-root process. * * Adjusts the memory offset to its absolute value according to the mapping - * type. Adds the map to the map list drm_device::maplist. Adds MTRR's where + * type. Adds the map to the map list drm_device::legacy.maplist. Adds MTRR's where * applicable and if supported by the kernel. */ static int drm_addmap_core(struct drm_device * dev, resource_size_t offset, @@ -250,7 +250,7 @@ static int drm_addmap_core(struct drm_device * dev, resource_size_t offset, kfree(map); return -EBUSY; } - dev->sigdata.lock = dev->primary->master->lock.hw_lock = map->handle; /* Pointer to lock */ + dev->legacy.sigdata.lock = dev->primary->master->lock.hw_lock = map->handle; /* Pointer to lock */ } break; case _DRM_AGP: { @@ -300,11 +300,11 @@ static int drm_addmap_core(struct drm_device * dev, resource_size_t offset, break; } case _DRM_SCATTER_GATHER: - if (!dev->sg) { + if (!dev->legacy.sg) { kfree(map); return -EINVAL; } - map->offset += (unsigned long)dev->sg->virtual; + map->offset += (unsigned long)dev->legacy.sg->virtual; break; case _DRM_CONSISTENT: /* dma_addr_t is 64bit on i386 with CONFIG_HIGHMEM64G, @@ -335,7 +335,7 @@ static int drm_addmap_core(struct drm_device * dev, resource_size_t offset, list->map = map; mutex_lock(&dev->struct_mutex); - list_add(&list->head, &dev->maplist); + list_add(&list->head, &dev->legacy.maplist); /* Assign a 32-bit handle */ /* We do it here so that dev->struct_mutex protects the increment */ @@ -434,11 +434,11 @@ int drm_legacy_rmmap_locked(struct drm_device *dev, struct drm_local_map *map) struct drm_master *master; /* Find the list entry for the map and remove it */ - list_for_each_entry_safe(r_list, list_t, &dev->maplist, head) { + list_for_each_entry_safe(r_list, list_t, &dev->legacy.maplist, head) { if (r_list->map == map) { master = r_list->master; list_del(&r_list->head); - drm_ht_remove_key(&dev->map_hash, + drm_ht_remove_key(&dev->legacy.map_hash, r_list->user_token >> PAGE_SHIFT); kfree(r_list); found = 1; @@ -459,8 +459,8 @@ int drm_legacy_rmmap_locked(struct drm_device *dev, struct drm_local_map *map) case _DRM_SHM: vfree(map->handle); if (master) { - if (dev->sigdata.lock == master->lock.hw_lock) - dev->sigdata.lock = NULL; + if (dev->legacy.sigdata.lock == master->lock.hw_lock) + dev->legacy.sigdata.lock = NULL; master->lock.hw_lock = NULL; /* SHM removed */ master->lock.file_priv = NULL; wake_up_interruptible_all(&master->lock.lock_queue); @@ -518,7 +518,7 @@ int drm_legacy_rmmap_ioctl(struct drm_device *dev, void *data, int ret; mutex_lock(&dev->struct_mutex); - list_for_each_entry(r_list, &dev->maplist, head) { + list_for_each_entry(r_list, &dev->legacy.maplist, head) { if (r_list->map && r_list->user_token == (unsigned long)request->handle && r_list->map->flags & _DRM_REMOVABLE) { @@ -530,7 +530,7 @@ int drm_legacy_rmmap_ioctl(struct drm_device *dev, void *data, /* List has wrapped around to the head pointer, or its empty we didn't * find anything. */ - if (list_empty(&dev->maplist) || !map) { + if (list_empty(&dev->legacy.maplist) || !map) { mutex_unlock(&dev->struct_mutex); return -EINVAL; } @@ -597,7 +597,7 @@ static void drm_cleanup_buf_error(struct drm_device * dev, int drm_legacy_addbufs_agp(struct drm_device *dev, struct drm_buf_desc *request) { - struct drm_device_dma *dma = dev->dma; + struct drm_device_dma *dma = dev->legacy.dma; struct drm_buf_entry *entry; struct drm_agp_mem *agp_entry; struct drm_buf *buf; @@ -652,32 +652,32 @@ int drm_legacy_addbufs_agp(struct drm_device *dev, DRM_DEBUG("zone invalid\n"); return -EINVAL; } - spin_lock(&dev->buf_lock); - if (dev->buf_use) { - spin_unlock(&dev->buf_lock); + spin_lock(&dev->legacy.buf_lock); + if (dev->legacy.buf_use) { + spin_unlock(&dev->legacy.buf_lock); return -EBUSY; } - atomic_inc(&dev->buf_alloc); - spin_unlock(&dev->buf_lock); + atomic_inc(&dev->legacy.buf_alloc); + spin_unlock(&dev->legacy.buf_lock); mutex_lock(&dev->struct_mutex); entry = &dma->bufs[order]; if (entry->buf_count) { mutex_unlock(&dev->struct_mutex); - atomic_dec(&dev->buf_alloc); + atomic_dec(&dev->legacy.buf_alloc); return -ENOMEM; /* May only call once for each order */ } if (count < 0 || count > 4096) { mutex_unlock(&dev->struct_mutex); - atomic_dec(&dev->buf_alloc); + atomic_dec(&dev->legacy.buf_alloc); return -EINVAL; } entry->buflist = kzalloc(count * sizeof(*entry->buflist), GFP_KERNEL); if (!entry->buflist) { mutex_unlock(&dev->struct_mutex); - atomic_dec(&dev->buf_alloc); + atomic_dec(&dev->legacy.buf_alloc); return -ENOMEM; } @@ -708,7 +708,7 @@ int drm_legacy_addbufs_agp(struct drm_device *dev, entry->buf_count = count; drm_cleanup_buf_error(dev, entry); mutex_unlock(&dev->struct_mutex); - atomic_dec(&dev->buf_alloc); + atomic_dec(&dev->legacy.buf_alloc); return -ENOMEM; } @@ -728,7 +728,7 @@ int drm_legacy_addbufs_agp(struct drm_device *dev, /* Free the entry because it isn't valid */ drm_cleanup_buf_error(dev, entry); mutex_unlock(&dev->struct_mutex); - atomic_dec(&dev->buf_alloc); + atomic_dec(&dev->legacy.buf_alloc); return -ENOMEM; } dma->buflist = temp_buflist; @@ -752,7 +752,7 @@ int drm_legacy_addbufs_agp(struct drm_device *dev, dma->flags = _DRM_DMA_USE_AGP; - atomic_dec(&dev->buf_alloc); + atomic_dec(&dev->legacy.buf_alloc); return 0; } EXPORT_SYMBOL(drm_legacy_addbufs_agp); @@ -761,7 +761,7 @@ EXPORT_SYMBOL(drm_legacy_addbufs_agp); int drm_legacy_addbufs_pci(struct drm_device *dev, struct drm_buf_desc *request) { - struct drm_device_dma *dma = dev->dma; + struct drm_device_dma *dma = dev->legacy.dma; int count; int order; int size; @@ -802,32 +802,32 @@ int drm_legacy_addbufs_pci(struct drm_device *dev, page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0; total = PAGE_SIZE << page_order; - spin_lock(&dev->buf_lock); - if (dev->buf_use) { - spin_unlock(&dev->buf_lock); + spin_lock(&dev->legacy.buf_lock); + if (dev->legacy.buf_use) { + spin_unlock(&dev->legacy.buf_lock); return -EBUSY; } - atomic_inc(&dev->buf_alloc); - spin_unlock(&dev->buf_lock); + atomic_inc(&dev->legacy.buf_alloc); + spin_unlock(&dev->legacy.buf_lock); mutex_lock(&dev->struct_mutex); entry = &dma->bufs[order]; if (entry->buf_count) { mutex_unlock(&dev->struct_mutex); - atomic_dec(&dev->buf_alloc); + atomic_dec(&dev->legacy.buf_alloc); return -ENOMEM; /* May only call once for each order */ } if (count < 0 || count > 4096) { mutex_unlock(&dev->struct_mutex); - atomic_dec(&dev->buf_alloc); + atomic_dec(&dev->legacy.buf_alloc); return -EINVAL; } entry->buflist = kzalloc(count * sizeof(*entry->buflist), GFP_KERNEL); if (!entry->buflist) { mutex_unlock(&dev->struct_mutex); - atomic_dec(&dev->buf_alloc); + atomic_dec(&dev->legacy.buf_alloc); return -ENOMEM; } @@ -835,7 +835,7 @@ int drm_legacy_addbufs_pci(struct drm_device *dev, if (!entry->seglist) { kfree(entry->buflist); mutex_unlock(&dev->struct_mutex); - atomic_dec(&dev->buf_alloc); + atomic_dec(&dev->legacy.buf_alloc); return -ENOMEM; } @@ -848,7 +848,7 @@ int drm_legacy_addbufs_pci(struct drm_device *dev, kfree(entry->buflist); kfree(entry->seglist); mutex_unlock(&dev->struct_mutex); - atomic_dec(&dev->buf_alloc); + atomic_dec(&dev->legacy.buf_alloc); return -ENOMEM; } memcpy(temp_pagelist, @@ -872,7 +872,7 @@ int drm_legacy_addbufs_pci(struct drm_device *dev, drm_cleanup_buf_error(dev, entry); kfree(temp_pagelist); mutex_unlock(&dev->struct_mutex); - atomic_dec(&dev->buf_alloc); + atomic_dec(&dev->legacy.buf_alloc); return -ENOMEM; } entry->seglist[entry->seg_count++] = dmah; @@ -909,7 +909,7 @@ int drm_legacy_addbufs_pci(struct drm_device *dev, drm_cleanup_buf_error(dev, entry); kfree(temp_pagelist); mutex_unlock(&dev->struct_mutex); - atomic_dec(&dev->buf_alloc); + atomic_dec(&dev->legacy.buf_alloc); return -ENOMEM; } @@ -927,7 +927,7 @@ int drm_legacy_addbufs_pci(struct drm_device *dev, drm_cleanup_buf_error(dev, entry); kfree(temp_pagelist); mutex_unlock(&dev->struct_mutex); - atomic_dec(&dev->buf_alloc); + atomic_dec(&dev->legacy.buf_alloc); return -ENOMEM; } dma->buflist = temp_buflist; @@ -957,7 +957,7 @@ int drm_legacy_addbufs_pci(struct drm_device *dev, if (request->flags & _DRM_PCI_BUFFER_RO) dma->flags = _DRM_DMA_USE_PCI_RO; - atomic_dec(&dev->buf_alloc); + atomic_dec(&dev->legacy.buf_alloc); return 0; } @@ -966,7 +966,7 @@ EXPORT_SYMBOL(drm_legacy_addbufs_pci); static int drm_legacy_addbufs_sg(struct drm_device *dev, struct drm_buf_desc *request) { - struct drm_device_dma *dma = dev->dma; + struct drm_device_dma *dma = dev->legacy.dma; struct drm_buf_entry *entry; struct drm_buf *buf; unsigned long offset; @@ -1013,25 +1013,25 @@ static int drm_legacy_addbufs_sg(struct drm_device *dev, if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER) return -EINVAL; - spin_lock(&dev->buf_lock); - if (dev->buf_use) { - spin_unlock(&dev->buf_lock); + spin_lock(&dev->legacy.buf_lock); + if (dev->legacy.buf_use) { + spin_unlock(&dev->legacy.buf_lock); return -EBUSY; } - atomic_inc(&dev->buf_alloc); - spin_unlock(&dev->buf_lock); + atomic_inc(&dev->legacy.buf_alloc); + spin_unlock(&dev->legacy.buf_lock); mutex_lock(&dev->struct_mutex); entry = &dma->bufs[order]; if (entry->buf_count) { mutex_unlock(&dev->struct_mutex); - atomic_dec(&dev->buf_alloc); + atomic_dec(&dev->legacy.buf_alloc); return -ENOMEM; /* May only call once for each order */ } if (count < 0 || count > 4096) { mutex_unlock(&dev->struct_mutex); - atomic_dec(&dev->buf_alloc); + atomic_dec(&dev->legacy.buf_alloc); return -EINVAL; } @@ -1039,7 +1039,7 @@ static int drm_legacy_addbufs_sg(struct drm_device *dev, GFP_KERNEL); if (!entry->buflist) { mutex_unlock(&dev->struct_mutex); - atomic_dec(&dev->buf_alloc); + atomic_dec(&dev->legacy.buf_alloc); return -ENOMEM; } @@ -1058,7 +1058,7 @@ static int drm_legacy_addbufs_sg(struct drm_device *dev, buf->offset = (dma->byte_count + offset); buf->bus_address = agp_offset + offset; buf->address = (void *)(agp_offset + offset - + (unsigned long)dev->sg->virtual); + + (unsigned long)dev->legacy.sg->virtual); buf->next = NULL; buf->waiting = 0; buf->pending = 0; @@ -1071,7 +1071,7 @@ static int drm_legacy_addbufs_sg(struct drm_device *dev, entry->buf_count = count; drm_cleanup_buf_error(dev, entry); mutex_unlock(&dev->struct_mutex); - atomic_dec(&dev->buf_alloc); + atomic_dec(&dev->legacy.buf_alloc); return -ENOMEM; } @@ -1091,7 +1091,7 @@ static int drm_legacy_addbufs_sg(struct drm_device *dev, /* Free the entry because it isn't valid */ drm_cleanup_buf_error(dev, entry); mutex_unlock(&dev->struct_mutex); - atomic_dec(&dev->buf_alloc); + atomic_dec(&dev->legacy.buf_alloc); return -ENOMEM; } dma->buflist = temp_buflist; @@ -1115,7 +1115,7 @@ static int drm_legacy_addbufs_sg(struct drm_device *dev, dma->flags = _DRM_DMA_USE_SG; - atomic_dec(&dev->buf_alloc); + atomic_dec(&dev->legacy.buf_alloc); return 0; } @@ -1173,14 +1173,14 @@ int drm_legacy_addbufs(struct drm_device *dev, void *data, * \param arg pointer to a drm_buf_info structure. * \return zero on success or a negative number on failure. * - * Increments drm_device::buf_use while holding the drm_device::buf_lock + * Increments drm_device::legacy.buf_use while holding the drm_device::legacy.buf_lock * lock, preventing of allocating more buffers after this call. Information * about each requested buffer is then copied into user space. */ int drm_legacy_infobufs(struct drm_device *dev, void *data, struct drm_file *file_priv) { - struct drm_device_dma *dma = dev->dma; + struct drm_device_dma *dma = dev->legacy.dma; struct drm_buf_info *request = data; int i; int count; @@ -1194,13 +1194,13 @@ int drm_legacy_infobufs(struct drm_device *dev, void *data, if (!dma) return -EINVAL; - spin_lock(&dev->buf_lock); - if (atomic_read(&dev->buf_alloc)) { - spin_unlock(&dev->buf_lock); + spin_lock(&dev->legacy.buf_lock); + if (atomic_read(&dev->legacy.buf_alloc)) { + spin_unlock(&dev->legacy.buf_lock); return -EBUSY; } - ++dev->buf_use; /* Can't allocate more after this call */ - spin_unlock(&dev->buf_lock); + ++dev->legacy.buf_use; /* Can't allocate more after this call */ + spin_unlock(&dev->legacy.buf_lock); for (i = 0, count = 0; i < DRM_MAX_ORDER + 1; i++) { if (dma->bufs[i].buf_count) @@ -1261,7 +1261,7 @@ int drm_legacy_infobufs(struct drm_device *dev, void *data, int drm_legacy_markbufs(struct drm_device *dev, void *data, struct drm_file *file_priv) { - struct drm_device_dma *dma = dev->dma; + struct drm_device_dma *dma = dev->legacy.dma; struct drm_buf_desc *request = data; int order; struct drm_buf_entry *entry; @@ -1308,7 +1308,7 @@ int drm_legacy_markbufs(struct drm_device *dev, void *data, int drm_legacy_freebufs(struct drm_device *dev, void *data, struct drm_file *file_priv) { - struct drm_device_dma *dma = dev->dma; + struct drm_device_dma *dma = dev->legacy.dma; struct drm_buf_free *request = data; int i; int idx; @@ -1361,7 +1361,7 @@ int drm_legacy_freebufs(struct drm_device *dev, void *data, int drm_legacy_mapbufs(struct drm_device *dev, void *data, struct drm_file *file_priv) { - struct drm_device_dma *dma = dev->dma; + struct drm_device_dma *dma = dev->legacy.dma; int retcode = 0; const int zero = 0; unsigned long virtual; @@ -1378,20 +1378,20 @@ int drm_legacy_mapbufs(struct drm_device *dev, void *data, if (!dma) return -EINVAL; - spin_lock(&dev->buf_lock); - if (atomic_read(&dev->buf_alloc)) { - spin_unlock(&dev->buf_lock); + spin_lock(&dev->legacy.buf_lock); + if (atomic_read(&dev->legacy.buf_alloc)) { + spin_unlock(&dev->legacy.buf_lock); return -EBUSY; } - dev->buf_use++; /* Can't allocate more after this call */ - spin_unlock(&dev->buf_lock); + dev->legacy.buf_use++; /* Can't allocate more after this call */ + spin_unlock(&dev->legacy.buf_lock); if (request->count >= dma->buf_count) { if ((dev->agp && (dma->flags & _DRM_DMA_USE_AGP)) || (drm_core_check_feature(dev, DRIVER_SG) && (dma->flags & _DRM_DMA_USE_SG))) { - struct drm_local_map *map = dev->agp_buffer_map; - unsigned long token = dev->agp_buffer_token; + struct drm_local_map *map = dev->legacy.agp_buffer_map; + unsigned long token = dev->legacy.agp_buffer_token; if (!map) { retcode = -EINVAL; @@ -1462,7 +1462,7 @@ struct drm_local_map *drm_legacy_getsarea(struct drm_device *dev) { struct drm_map_list *entry; - list_for_each_entry(entry, &dev->maplist, head) { + list_for_each_entry(entry, &dev->legacy.maplist, head) { if (entry->map && entry->map->type == _DRM_SHM && (entry->map->flags & _DRM_CONTAINS_LOCK)) { return entry->map; diff --git a/drivers/gpu/drm/drm_context.c b/drivers/gpu/drm/drm_context.c index 9b23525..9ce051e 100644 --- a/drivers/gpu/drm/drm_context.c +++ b/drivers/gpu/drm/drm_context.c @@ -48,13 +48,13 @@ struct drm_ctx_list { * \param ctx_handle context handle. * * Clears the bit specified by \p ctx_handle in drm_device::ctx_bitmap and the entry - * in drm_device::ctx_idr, while holding the drm_device::struct_mutex + * in drm_device::legacy.ctx_idr, while holding the drm_device::struct_mutex * lock. */ void drm_legacy_ctxbitmap_free(struct drm_device * dev, int ctx_handle) { mutex_lock(&dev->struct_mutex); - idr_remove(&dev->ctx_idr, ctx_handle); + idr_remove(&dev->legacy.ctx_idr, ctx_handle); mutex_unlock(&dev->struct_mutex); } @@ -64,7 +64,7 @@ void drm_legacy_ctxbitmap_free(struct drm_device * dev, int ctx_handle) * \param dev DRM device. * \return (non-negative) context handle on success or a negative number on failure. * - * Allocate a new idr from drm_device::ctx_idr while holding the + * Allocate a new idr from drm_device::legacy.ctx_idr while holding the * drm_device::struct_mutex lock. */ static int drm_legacy_ctxbitmap_next(struct drm_device * dev) @@ -72,7 +72,7 @@ static int drm_legacy_ctxbitmap_next(struct drm_device * dev) int ret; mutex_lock(&dev->struct_mutex); - ret = idr_alloc(&dev->ctx_idr, NULL, DRM_RESERVED_CONTEXTS, 0, + ret = idr_alloc(&dev->legacy.ctx_idr, NULL, DRM_RESERVED_CONTEXTS, 0, GFP_KERNEL); mutex_unlock(&dev->struct_mutex); return ret; @@ -83,11 +83,11 @@ static int drm_legacy_ctxbitmap_next(struct drm_device * dev) * * \param dev DRM device. * - * Initialise the drm_device::ctx_idr + * Initialise the drm_device::legacy.ctx_idr */ int drm_legacy_ctxbitmap_init(struct drm_device * dev) { - idr_init(&dev->ctx_idr); + idr_init(&dev->legacy.ctx_idr); return 0; } @@ -102,7 +102,7 @@ int drm_legacy_ctxbitmap_init(struct drm_device * dev) void drm_legacy_ctxbitmap_cleanup(struct drm_device * dev) { mutex_lock(&dev->struct_mutex); - idr_destroy(&dev->ctx_idr); + idr_destroy(&dev->legacy.ctx_idr); mutex_unlock(&dev->struct_mutex); } @@ -119,9 +119,9 @@ void drm_legacy_ctxbitmap_flush(struct drm_device *dev, struct drm_file *file) { struct drm_ctx_list *pos, *tmp; - mutex_lock(&dev->ctxlist_mutex); + mutex_lock(&dev->legacy.ctxlist_mutex); - list_for_each_entry_safe(pos, tmp, &dev->ctxlist, head) { + list_for_each_entry_safe(pos, tmp, &dev->legacy.ctxlist, head) { if (pos->tag == file && pos->handle != DRM_KERNEL_CONTEXT) { if (dev->driver->context_dtor) @@ -133,7 +133,7 @@ void drm_legacy_ctxbitmap_flush(struct drm_device *dev, struct drm_file *file) } } - mutex_unlock(&dev->ctxlist_mutex); + mutex_unlock(&dev->legacy.ctxlist_mutex); } /*@}*/ @@ -151,7 +151,7 @@ void drm_legacy_ctxbitmap_flush(struct drm_device *dev, struct drm_file *file) * \param arg user argument pointing to a drm_ctx_priv_map structure. * \return zero on success or a negative number on failure. * - * Gets the map from drm_device::ctx_idr with the handle specified and + * Gets the map from drm_device::legacy.ctx_idr with the handle specified and * returns its handle. */ int drm_legacy_getsareactx(struct drm_device *dev, void *data, @@ -163,14 +163,14 @@ int drm_legacy_getsareactx(struct drm_device *dev, void *data, mutex_lock(&dev->struct_mutex); - map = idr_find(&dev->ctx_idr, request->ctx_id); + map = idr_find(&dev->legacy.ctx_idr, request->ctx_id); if (!map) { mutex_unlock(&dev->struct_mutex); return -EINVAL; } request->handle = NULL; - list_for_each_entry(_entry, &dev->maplist, head) { + list_for_each_entry(_entry, &dev->legacy.maplist, head) { if (_entry->map == map) { request->handle = (void *)(unsigned long)_entry->user_token; @@ -196,7 +196,7 @@ int drm_legacy_getsareactx(struct drm_device *dev, void *data, * \return zero on success or a negative number on failure. * * Searches the mapping specified in \p arg and update the entry in - * drm_device::ctx_idr with it. + * drm_device::legacy.ctx_idr with it. */ int drm_legacy_setsareactx(struct drm_device *dev, void *data, struct drm_file *file_priv) @@ -206,7 +206,7 @@ int drm_legacy_setsareactx(struct drm_device *dev, void *data, struct drm_map_list *r_list = NULL; mutex_lock(&dev->struct_mutex); - list_for_each_entry(r_list, &dev->maplist, head) { + list_for_each_entry(r_list, &dev->legacy.maplist, head) { if (r_list->map && r_list->user_token == (unsigned long) request->handle) goto found; @@ -220,7 +220,7 @@ int drm_legacy_setsareactx(struct drm_device *dev, void *data, if (!map) goto bad; - if (IS_ERR(idr_replace(&dev->ctx_idr, map, request->ctx_id))) + if (IS_ERR(idr_replace(&dev->legacy.ctx_idr, map, request->ctx_id))) goto bad; mutex_unlock(&dev->struct_mutex); @@ -242,19 +242,19 @@ int drm_legacy_setsareactx(struct drm_device *dev, void *data, * \param new new context handle. * \return zero on success or a negative number on failure. * - * Attempt to set drm_device::context_flag. + * Attempt to set drm_device::legacy.context_flag. */ static int drm_context_switch(struct drm_device * dev, int old, int new) { - if (test_and_set_bit(0, &dev->context_flag)) { + if (test_and_set_bit(0, &dev->legacy.context_flag)) { DRM_ERROR("Reentering -- FIXME\n"); return -EBUSY; } DRM_DEBUG("Context switch from %d to %d\n", old, new); - if (new == dev->last_context) { - clear_bit(0, &dev->context_flag); + if (new == dev->legacy.last_context) { + clear_bit(0, &dev->legacy.context_flag); return 0; } @@ -268,14 +268,14 @@ static int drm_context_switch(struct drm_device * dev, int old, int new) * \param new new context handle. * \return zero on success or a negative number on failure. * - * Updates drm_device::last_context and drm_device::last_switch. Verifies the - * hardware lock is held, clears the drm_device::context_flag and wakes up + * Updates drm_device::legacy.last_context and drm_device::last_switch. Verifies the + * hardware lock is held, clears the drm_device::legacy.context_flag and wakes up * drm_device::context_wait. */ static int drm_context_switch_complete(struct drm_device *dev, struct drm_file *file_priv, int new) { - dev->last_context = new; /* PRE/POST: This is the _only_ writer. */ + dev->legacy.last_context = new; /* PRE/POST: This is the _only_ writer. */ if (!_DRM_LOCK_IS_HELD(file_priv->master->lock.hw_lock->lock)) { DRM_ERROR("Lock isn't held after context switch\n"); @@ -284,7 +284,7 @@ static int drm_context_switch_complete(struct drm_device *dev, /* If a context switch is ever initiated when the kernel holds the lock, release that lock here. */ - clear_bit(0, &dev->context_flag); + clear_bit(0, &dev->legacy.context_flag); return 0; } @@ -357,9 +357,9 @@ int drm_legacy_addctx(struct drm_device *dev, void *data, ctx_entry->handle = ctx->handle; ctx_entry->tag = file_priv; - mutex_lock(&dev->ctxlist_mutex); - list_add(&ctx_entry->head, &dev->ctxlist); - mutex_unlock(&dev->ctxlist_mutex); + mutex_lock(&dev->legacy.ctxlist_mutex); + list_add(&ctx_entry->head, &dev->legacy.ctxlist); + mutex_unlock(&dev->legacy.ctxlist_mutex); return 0; } @@ -401,7 +401,7 @@ int drm_legacy_switchctx(struct drm_device *dev, void *data, struct drm_ctx *ctx = data; DRM_DEBUG("%d\n", ctx->handle); - return drm_context_switch(dev, dev->last_context, ctx->handle); + return drm_context_switch(dev, dev->legacy.last_context, ctx->handle); } /** @@ -449,18 +449,18 @@ int drm_legacy_rmctx(struct drm_device *dev, void *data, drm_legacy_ctxbitmap_free(dev, ctx->handle); } - mutex_lock(&dev->ctxlist_mutex); - if (!list_empty(&dev->ctxlist)) { + mutex_lock(&dev->legacy.ctxlist_mutex); + if (!list_empty(&dev->legacy.ctxlist)) { struct drm_ctx_list *pos, *n; - list_for_each_entry_safe(pos, n, &dev->ctxlist, head) { + list_for_each_entry_safe(pos, n, &dev->legacy.ctxlist, head) { if (pos->handle == ctx->handle) { list_del(&pos->head); kfree(pos); } } } - mutex_unlock(&dev->ctxlist_mutex); + mutex_unlock(&dev->legacy.ctxlist_mutex); return 0; } diff --git a/drivers/gpu/drm/drm_dma.c b/drivers/gpu/drm/drm_dma.c index ea48180..10e67de 100644 --- a/drivers/gpu/drm/drm_dma.c +++ b/drivers/gpu/drm/drm_dma.c @@ -54,15 +54,15 @@ int drm_legacy_dma_setup(struct drm_device *dev) return 0; } - dev->buf_use = 0; - atomic_set(&dev->buf_alloc, 0); + dev->legacy.buf_use = 0; + atomic_set(&dev->legacy.buf_alloc, 0); - dev->dma = kzalloc(sizeof(*dev->dma), GFP_KERNEL); - if (!dev->dma) + dev->legacy.dma = kzalloc(sizeof(*dev->legacy.dma), GFP_KERNEL); + if (!dev->legacy.dma) return -ENOMEM; for (i = 0; i <= DRM_MAX_ORDER; i++) - memset(&dev->dma->bufs[i], 0, sizeof(dev->dma->bufs[0])); + memset(&dev->legacy.dma->bufs[i], 0, sizeof(dev->legacy.dma->bufs[0])); return 0; } @@ -77,7 +77,7 @@ int drm_legacy_dma_setup(struct drm_device *dev) */ void drm_legacy_dma_takedown(struct drm_device *dev) { - struct drm_device_dma *dma = dev->dma; + struct drm_device_dma *dma = dev->legacy.dma; int i, j; if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA) || @@ -113,8 +113,8 @@ void drm_legacy_dma_takedown(struct drm_device *dev) kfree(dma->buflist); kfree(dma->pagelist); - kfree(dev->dma); - dev->dma = NULL; + kfree(dev->legacy.dma); + dev->legacy.dma = NULL; } /** @@ -146,7 +146,7 @@ void drm_legacy_free_buffer(struct drm_device *dev, struct drm_buf * buf) void drm_legacy_reclaim_buffers(struct drm_device *dev, struct drm_file *file_priv) { - struct drm_device_dma *dma = dev->dma; + struct drm_device_dma *dma = dev->legacy.dma; int i; if (!dma) diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c index 8889f8e..5021c83 100644 --- a/drivers/gpu/drm/drm_drv.c +++ b/drivers/gpu/drm/drm_drv.c @@ -127,7 +127,7 @@ static void drm_master_destroy(struct kref *kref) if (dev->driver->master_destroy) dev->driver->master_destroy(dev, master); - list_for_each_entry_safe(r_list, list_temp, &dev->maplist, head) { + list_for_each_entry_safe(r_list, list_temp, &dev->legacy.maplist, head) { if (r_list->master == master) { drm_legacy_rmmap_locked(dev, r_list->map); r_list = NULL; @@ -522,6 +522,37 @@ static void drm_fs_inode_free(struct inode *inode) } } +static void drm_legacy_device_release(struct drm_device *dev) +{ + drm_legacy_ctxbitmap_cleanup(dev); + drm_ht_remove(&dev->legacy.map_hash); +} + +static int drm_legacy_device_init(struct drm_device *dev) +{ + int ret; + + spin_lock_init(&dev->legacy.buf_lock); + mutex_init(&dev->legacy.ctxlist_mutex); + INIT_LIST_HEAD(&dev->legacy.maplist); + INIT_LIST_HEAD(&dev->legacy.ctxlist); + + ret = drm_ht_create(&dev->legacy.map_hash, 12); + if (ret) + return ret; + + ret = drm_legacy_ctxbitmap_init(dev); + if (ret) { + DRM_ERROR("Cannot allocate memory for context bitmap.\n"); + goto err_ht; + } + + return 0; +err_ht: + drm_ht_remove(&dev->legacy.map_hash); + return ret; +} + /** * drm_dev_alloc - Allocate new DRM device * @driver: DRM driver to allocate device for @@ -552,17 +583,14 @@ struct drm_device *drm_dev_alloc(struct drm_driver *driver, dev->driver = driver; INIT_LIST_HEAD(&dev->filelist); - INIT_LIST_HEAD(&dev->ctxlist); INIT_LIST_HEAD(&dev->vmalist); - INIT_LIST_HEAD(&dev->maplist); INIT_LIST_HEAD(&dev->vblank_event_list); - spin_lock_init(&dev->buf_lock); spin_lock_init(&dev->event_lock); mutex_init(&dev->struct_mutex); - mutex_init(&dev->ctxlist_mutex); mutex_init(&dev->master_mutex); + dev->anon_inode = drm_fs_inode_new(); if (IS_ERR(dev->anon_inode)) { ret = PTR_ERR(dev->anon_inode); @@ -586,29 +614,22 @@ struct drm_device *drm_dev_alloc(struct drm_driver *driver, if (ret) goto err_minors; - if (drm_ht_create(&dev->map_hash, 12)) + ret = drm_legacy_device_init(dev); + if (ret) goto err_minors; - ret = drm_legacy_ctxbitmap_init(dev); - if (ret) { - DRM_ERROR("Cannot allocate memory for context bitmap.\n"); - goto err_ht; - } - if (driver->driver_features & DRIVER_GEM) { ret = drm_gem_init(dev); if (ret) { DRM_ERROR("Cannot initialize graphics execution manager (GEM)\n"); - goto err_ctxbitmap; + goto err_legacy; } } return dev; -err_ctxbitmap: - drm_legacy_ctxbitmap_cleanup(dev); -err_ht: - drm_ht_remove(&dev->map_hash); +err_legacy: + drm_legacy_device_release(dev); err_minors: drm_minor_free(dev, DRM_MINOR_LEGACY); drm_minor_free(dev, DRM_MINOR_RENDER); @@ -621,6 +642,7 @@ err_free: } EXPORT_SYMBOL(drm_dev_alloc); + static void drm_dev_release(struct kref *ref) { struct drm_device *dev = container_of(ref, struct drm_device, ref); @@ -628,8 +650,8 @@ static void drm_dev_release(struct kref *ref) if (dev->driver->driver_features & DRIVER_GEM) drm_gem_destroy(dev); - drm_legacy_ctxbitmap_cleanup(dev); - drm_ht_remove(&dev->map_hash); + drm_legacy_device_release(dev); + drm_fs_inode_free(dev->anon_inode); drm_minor_free(dev, DRM_MINOR_LEGACY); @@ -758,7 +780,7 @@ void drm_dev_unregister(struct drm_device *dev) drm_vblank_cleanup(dev); - list_for_each_entry_safe(r_list, list_temp, &dev->maplist, head) + list_for_each_entry_safe(r_list, list_temp, &dev->legacy.maplist, head) drm_legacy_rmmap(dev, r_list->map); drm_minor_unregister(dev, DRM_MINOR_LEGACY); diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c index 3e66946..bd5d194 100644 --- a/drivers/gpu/drm/drm_fops.c +++ b/drivers/gpu/drm/drm_fops.c @@ -312,10 +312,10 @@ static void drm_legacy_dev_reinit(struct drm_device *dev) if (drm_core_check_feature(dev, DRIVER_MODESET)) return; - dev->sigdata.lock = NULL; + dev->legacy.sigdata.lock = NULL; - dev->context_flag = 0; - dev->last_context = 0; + dev->legacy.context_flag = 0; + dev->legacy.last_context = 0; dev->if_version = 0; } @@ -427,8 +427,8 @@ int drm_release(struct inode *inode, struct file *filp) */ mutex_lock(&dev->struct_mutex); if (master->lock.hw_lock) { - if (dev->sigdata.lock == master->lock.hw_lock) - dev->sigdata.lock = NULL; + if (dev->legacy.sigdata.lock == master->lock.hw_lock) + dev->legacy.sigdata.lock = NULL; master->lock.hw_lock = NULL; master->lock.file_priv = NULL; wake_up_interruptible_all(&master->lock.lock_queue); diff --git a/drivers/gpu/drm/drm_info.c b/drivers/gpu/drm/drm_info.c index 51efebd..fb6a2be 100644 --- a/drivers/gpu/drm/drm_info.c +++ b/drivers/gpu/drm/drm_info.c @@ -86,7 +86,7 @@ int drm_vm_info(struct seq_file *m, void *data) mutex_lock(&dev->struct_mutex); seq_printf(m, "slot offset size type flags address mtrr\n\n"); i = 0; - list_for_each_entry(r_list, &dev->maplist, head) { + list_for_each_entry(r_list, &dev->legacy.maplist, head) { map = r_list->map; if (!map) continue; @@ -121,7 +121,7 @@ int drm_bufs_info(struct seq_file *m, void *data) int i, seg_pages; mutex_lock(&dev->struct_mutex); - dma = dev->dma; + dma = dev->legacy.dma; if (!dma) { mutex_unlock(&dev->struct_mutex); return 0; diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c index 00587a1..006f827 100644 --- a/drivers/gpu/drm/drm_ioctl.c +++ b/drivers/gpu/drm/drm_ioctl.c @@ -179,7 +179,7 @@ static int drm_getmap(struct drm_device *dev, void *data, i = 0; mutex_lock(&dev->struct_mutex); - list_for_each(list, &dev->maplist) { + list_for_each(list, &dev->legacy.maplist) { if (i == idx) { r_list = list_entry(list, struct drm_map_list, head); break; diff --git a/drivers/gpu/drm/drm_lock.c b/drivers/gpu/drm/drm_lock.c index f861361..80fc5a5 100644 --- a/drivers/gpu/drm/drm_lock.c +++ b/drivers/gpu/drm/drm_lock.c @@ -115,14 +115,14 @@ int drm_legacy_lock(struct drm_device *dev, void *data, * really probably not the correct answer but lets us debug xkb * xserver for now */ if (!file_priv->is_master) { - sigemptyset(&dev->sigmask); - sigaddset(&dev->sigmask, SIGSTOP); - sigaddset(&dev->sigmask, SIGTSTP); - sigaddset(&dev->sigmask, SIGTTIN); - sigaddset(&dev->sigmask, SIGTTOU); - dev->sigdata.context = lock->context; - dev->sigdata.lock = master->lock.hw_lock; - block_all_signals(drm_notifier, dev, &dev->sigmask); + sigemptyset(&dev->legacy.sigmask); + sigaddset(&dev->legacy.sigmask, SIGSTOP); + sigaddset(&dev->legacy.sigmask, SIGTSTP); + sigaddset(&dev->legacy.sigmask, SIGTTIN); + sigaddset(&dev->legacy.sigmask, SIGTTOU); + dev->legacy.sigdata.context = lock->context; + dev->legacy.sigdata.lock = master->lock.hw_lock; + block_all_signals(drm_notifier, dev, &dev->legacy.sigmask); } if (dev->driver->dma_quiescent && (lock->flags & _DRM_LOCK_QUIESCENT)) @@ -295,12 +295,12 @@ int drm_legacy_lock_free(struct drm_lock_data *lock_data, unsigned int context) static int drm_notifier(void *priv) { struct drm_device *dev = priv; - struct drm_hw_lock *lock = dev->sigdata.lock; + struct drm_hw_lock *lock = dev->legacy.sigdata.lock; unsigned int old, new, prev; /* Allow signal delivery if lock isn't held */ if (!lock || !_DRM_LOCK_IS_HELD(lock->lock) - || _DRM_LOCKING_CONTEXT(lock->lock) != dev->sigdata.context) + || _DRM_LOCKING_CONTEXT(lock->lock) != dev->legacy.sigdata.context) return 1; /* Otherwise, set flag to force call to diff --git a/drivers/gpu/drm/drm_scatter.c b/drivers/gpu/drm/drm_scatter.c index 4f0f3b3..d253935 100644 --- a/drivers/gpu/drm/drm_scatter.c +++ b/drivers/gpu/drm/drm_scatter.c @@ -67,10 +67,10 @@ static void drm_sg_cleanup(struct drm_sg_mem * entry) void drm_legacy_sg_cleanup(struct drm_device *dev) { - if (drm_core_check_feature(dev, DRIVER_SG) && dev->sg && + if (drm_core_check_feature(dev, DRIVER_SG) && dev->legacy.sg && !drm_core_check_feature(dev, DRIVER_MODESET)) { - drm_sg_cleanup(dev->sg); - dev->sg = NULL; + drm_sg_cleanup(dev->legacy.sg); + dev->legacy.sg = NULL; } } #ifdef _LP64 @@ -94,7 +94,7 @@ int drm_legacy_sg_alloc(struct drm_device *dev, void *data, if (!drm_core_check_feature(dev, DRIVER_SG)) return -EINVAL; - if (dev->sg) + if (dev->legacy.sg) return -EINVAL; entry = kzalloc(sizeof(*entry), GFP_KERNEL); @@ -146,7 +146,7 @@ int drm_legacy_sg_alloc(struct drm_device *dev, void *data, request->handle = entry->handle; - dev->sg = entry; + dev->legacy.sg = entry; #if DEBUG_SCATTER /* Verify that each page points to its virtual address, and vice @@ -207,8 +207,8 @@ int drm_legacy_sg_free(struct drm_device *dev, void *data, if (!drm_core_check_feature(dev, DRIVER_SG)) return -EINVAL; - entry = dev->sg; - dev->sg = NULL; + entry = dev->legacy.sg; + dev->legacy.sg = NULL; if (!entry || entry->handle != request->handle) return -EINVAL; diff --git a/drivers/gpu/drm/drm_vm.c b/drivers/gpu/drm/drm_vm.c index 4a2c328..db224b4 100644 --- a/drivers/gpu/drm/drm_vm.c +++ b/drivers/gpu/drm/drm_vm.c @@ -112,7 +112,7 @@ static int drm_do_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) if (!dev->agp || !dev->agp->cant_use_aperture) goto vm_fault_error; - if (drm_ht_find_item(&dev->map_hash, vma->vm_pgoff, &hash)) + if (drm_ht_find_item(&dev->legacy.map_hash, vma->vm_pgoff, &hash)) goto vm_fault_error; r_list = drm_hash_entry(hash, struct drm_map_list, hash); @@ -244,7 +244,7 @@ static void drm_vm_shm_close(struct vm_area_struct *vma) * we delete this mappings information. */ found_maps = 0; - list_for_each_entry(r_list, &dev->maplist, head) { + list_for_each_entry(r_list, &dev->legacy.maplist, head) { if (r_list->map == map) found_maps++; } @@ -290,7 +290,7 @@ static int drm_do_vm_dma_fault(struct vm_area_struct *vma, struct vm_fault *vmf) { struct drm_file *priv = vma->vm_file->private_data; struct drm_device *dev = priv->minor->dev; - struct drm_device_dma *dma = dev->dma; + struct drm_device_dma *dma = dev->legacy.dma; unsigned long offset; unsigned long page_nr; struct page *page; @@ -325,7 +325,7 @@ static int drm_do_vm_sg_fault(struct vm_area_struct *vma, struct vm_fault *vmf) struct drm_local_map *map = vma->vm_private_data; struct drm_file *priv = vma->vm_file->private_data; struct drm_device *dev = priv->minor->dev; - struct drm_sg_mem *entry = dev->sg; + struct drm_sg_mem *entry = dev->legacy.sg; unsigned long offset; unsigned long map_offset; unsigned long page_offset; @@ -337,7 +337,7 @@ static int drm_do_vm_sg_fault(struct vm_area_struct *vma, struct vm_fault *vmf) return VM_FAULT_SIGBUS; /* Nothing allocated */ offset = (unsigned long)vmf->virtual_address - vma->vm_start; - map_offset = map->offset - (unsigned long)dev->sg->virtual; + map_offset = map->offset - (unsigned long)dev->legacy.sg->virtual; page_offset = (offset >> PAGE_SHIFT) + (map_offset >> PAGE_SHIFT); page = entry->pagelist[page_offset]; get_page(page); @@ -481,7 +481,7 @@ static int drm_mmap_dma(struct file *filp, struct vm_area_struct *vma) unsigned long length = vma->vm_end - vma->vm_start; dev = priv->minor->dev; - dma = dev->dma; + dma = dev->legacy.dma; DRM_DEBUG("start = 0x%lx, end = 0x%lx, page offset = 0x%lx\n", vma->vm_start, vma->vm_end, vma->vm_pgoff); @@ -562,7 +562,7 @@ static int drm_mmap_locked(struct file *filp, struct vm_area_struct *vma) ) return drm_mmap_dma(filp, vma); - if (drm_ht_find_item(&dev->map_hash, vma->vm_pgoff, &hash)) { + if (drm_ht_find_item(&dev->legacy.map_hash, vma->vm_pgoff, &hash)) { DRM_ERROR("Could not find map\n"); return -EINVAL; } diff --git a/drivers/gpu/drm/i810/i810_dma.c b/drivers/gpu/drm/i810/i810_dma.c index d918567..c3cf326 100644 --- a/drivers/gpu/drm/i810/i810_dma.c +++ b/drivers/gpu/drm/i810/i810_dma.c @@ -47,7 +47,7 @@ static struct drm_buf *i810_freelist_get(struct drm_device * dev) { - struct drm_device_dma *dma = dev->dma; + struct drm_device_dma *dma = dev->legacy.dma; int i; int used; @@ -198,7 +198,7 @@ static int i810_dma_get_buffer(struct drm_device *dev, drm_i810_dma_t *d, static int i810_dma_cleanup(struct drm_device *dev) { - struct drm_device_dma *dma = dev->dma; + struct drm_device_dma *dma = dev->legacy.dma; /* Make sure interrupts are disabled here because the uninstall ioctl * may not have been called from userspace and after dev_private @@ -280,7 +280,7 @@ static void i810_kernel_lost_context(struct drm_device *dev) static int i810_freelist_init(struct drm_device *dev, drm_i810_private_t *dev_priv) { - struct drm_device_dma *dma = dev->dma; + struct drm_device_dma *dma = dev->legacy.dma; int my_idx = 24; u32 *hw_status = (u32 *) (dev_priv->hw_status_page + my_idx); int i; @@ -320,7 +320,7 @@ static int i810_dma_initialize(struct drm_device *dev, struct drm_map_list *r_list; memset(dev_priv, 0, sizeof(drm_i810_private_t)); - list_for_each_entry(r_list, &dev->maplist, head) { + list_for_each_entry(r_list, &dev->legacy.maplist, head) { if (r_list->map && r_list->map->type == _DRM_SHM && r_list->map->flags & _DRM_CONTAINS_LOCK) { @@ -341,9 +341,9 @@ static int i810_dma_initialize(struct drm_device *dev, DRM_ERROR("can not find mmio map!\n"); return -EINVAL; } - dev->agp_buffer_token = init->buffers_offset; - dev->agp_buffer_map = drm_legacy_findmap(dev, init->buffers_offset); - if (!dev->agp_buffer_map) { + dev->legacy.agp_buffer_token = init->buffers_offset; + dev->legacy.agp_buffer_map = drm_legacy_findmap(dev, init->buffers_offset); + if (!dev->legacy.agp_buffer_map) { dev->dev_private = (void *)dev_priv; i810_dma_cleanup(dev); DRM_ERROR("can not find dma buffer map!\n"); @@ -851,7 +851,7 @@ static void i810_dma_quiescent(struct drm_device *dev) static int i810_flush_queue(struct drm_device *dev) { drm_i810_private_t *dev_priv = dev->dev_private; - struct drm_device_dma *dma = dev->dma; + struct drm_device_dma *dma = dev->legacy.dma; int i, ret = 0; RING_LOCALS; @@ -884,7 +884,7 @@ static int i810_flush_queue(struct drm_device *dev) void i810_driver_reclaim_buffers(struct drm_device *dev, struct drm_file *file_priv) { - struct drm_device_dma *dma = dev->dma; + struct drm_device_dma *dma = dev->legacy.dma; int i; if (!dma) @@ -924,7 +924,7 @@ static int i810_flush_ioctl(struct drm_device *dev, void *data, static int i810_dma_vertex(struct drm_device *dev, void *data, struct drm_file *file_priv) { - struct drm_device_dma *dma = dev->dma; + struct drm_device_dma *dma = dev->legacy.dma; drm_i810_private_t *dev_priv = (drm_i810_private_t *) dev->dev_private; u32 *hw_status = dev_priv->hw_status_page; drm_i810_sarea_t *sarea_priv = (drm_i810_sarea_t *) @@ -1087,7 +1087,7 @@ static void i810_dma_dispatch_mc(struct drm_device *dev, struct drm_buf *buf, in static int i810_dma_mc(struct drm_device *dev, void *data, struct drm_file *file_priv) { - struct drm_device_dma *dma = dev->dma; + struct drm_device_dma *dma = dev->legacy.dma; drm_i810_private_t *dev_priv = (drm_i810_private_t *) dev->dev_private; u32 *hw_status = dev_priv->hw_status_page; drm_i810_sarea_t *sarea_priv = (drm_i810_sarea_t *) diff --git a/drivers/gpu/drm/mga/mga_dma.c b/drivers/gpu/drm/mga/mga_dma.c index 8cfa9cb..83c3d7a 100644 --- a/drivers/gpu/drm/mga/mga_dma.c +++ b/drivers/gpu/drm/mga/mga_dma.c @@ -243,7 +243,7 @@ static void mga_freelist_print(struct drm_device *dev) static int mga_freelist_init(struct drm_device *dev, drm_mga_private_t *dev_priv) { - struct drm_device_dma *dma = dev->dma; + struct drm_device_dma *dma = dev->legacy.dma; struct drm_buf *buf; drm_mga_buf_priv_t *buf_priv; drm_mga_freelist_t *entry; @@ -306,7 +306,7 @@ static void mga_freelist_cleanup(struct drm_device *dev) */ static void mga_freelist_reset(struct drm_device *dev) { - struct drm_device_dma *dma = dev->dma; + struct drm_device_dma *dma = dev->legacy.dma; struct drm_buf *buf; drm_mga_buf_priv_t *buf_priv; int i; @@ -526,7 +526,7 @@ static int mga_do_agp_dma_bootstrap(struct drm_device *dev, offset += dma_bs->primary_size; err = drm_legacy_addmap(dev, offset, secondary_size, - _DRM_AGP, 0, &dev->agp_buffer_map); + _DRM_AGP, 0, &dev->legacy.agp_buffer_map); if (err) { DRM_ERROR("Unable to map secondary DMA region: %d\n", err); return err; @@ -548,14 +548,14 @@ static int mga_do_agp_dma_bootstrap(struct drm_device *dev, struct drm_map_list *_entry; unsigned long agp_token = 0; - list_for_each_entry(_entry, &dev->maplist, head) { - if (_entry->map == dev->agp_buffer_map) + list_for_each_entry(_entry, &dev->legacy.maplist, head) { + if (_entry->map == dev->legacy.agp_buffer_map) agp_token = _entry->user_token; } if (!agp_token) return -EFAULT; - dev->agp_buffer_token = agp_token; + dev->legacy.agp_buffer_token = agp_token; } offset += secondary_size; @@ -568,13 +568,13 @@ static int mga_do_agp_dma_bootstrap(struct drm_device *dev, drm_legacy_ioremap(dev_priv->warp, dev); drm_legacy_ioremap(dev_priv->primary, dev); - drm_legacy_ioremap(dev->agp_buffer_map, dev); + drm_legacy_ioremap(dev->legacy.agp_buffer_map, dev); if (!dev_priv->warp->handle || - !dev_priv->primary->handle || !dev->agp_buffer_map->handle) { + !dev_priv->primary->handle || !dev->legacy.agp_buffer_map->handle) { DRM_ERROR("failed to ioremap agp regions! (%p, %p, %p)\n", dev_priv->warp->handle, dev_priv->primary->handle, - dev->agp_buffer_map->handle); + dev->legacy.agp_buffer_map->handle); return -ENOMEM; } @@ -617,8 +617,8 @@ static int mga_do_pci_dma_bootstrap(struct drm_device *dev, int err; struct drm_buf_desc req; - if (dev->dma == NULL) { - DRM_ERROR("dev->dma is NULL\n"); + if (dev->legacy.dma == NULL) { + DRM_ERROR("dev->legacy.dma is NULL\n"); return -EFAULT; } @@ -841,17 +841,17 @@ static int mga_do_init_dma(struct drm_device *dev, drm_mga_init_t *init) DRM_ERROR("failed to find primary dma region!\n"); return -EINVAL; } - dev->agp_buffer_token = init->buffers_offset; - dev->agp_buffer_map = + dev->legacy.agp_buffer_token = init->buffers_offset; + dev->legacy.agp_buffer_map = drm_legacy_findmap(dev, init->buffers_offset); - if (!dev->agp_buffer_map) { + if (!dev->legacy.agp_buffer_map) { DRM_ERROR("failed to find dma buffer region!\n"); return -EINVAL; } drm_legacy_ioremap(dev_priv->warp, dev); drm_legacy_ioremap(dev_priv->primary, dev); - drm_legacy_ioremap(dev->agp_buffer_map, dev); + drm_legacy_ioremap(dev->legacy.agp_buffer_map, dev); } dev_priv->sarea_priv = @@ -861,8 +861,8 @@ static int mga_do_init_dma(struct drm_device *dev, drm_mga_init_t *init) if (!dev_priv->warp->handle || !dev_priv->primary->handle || ((dev_priv->dma_access != 0) && - ((dev->agp_buffer_map == NULL) || - (dev->agp_buffer_map->handle == NULL)))) { + ((dev->legacy.agp_buffer_map == NULL) || + (dev->legacy.agp_buffer_map->handle == NULL)))) { DRM_ERROR("failed to ioremap agp regions!\n"); return -ENOMEM; } @@ -943,8 +943,8 @@ static int mga_do_cleanup_dma(struct drm_device *dev, int full_cleanup) && (dev_priv->primary->type != _DRM_CONSISTENT)) drm_legacy_ioremapfree(dev_priv->primary, dev); - if (dev->agp_buffer_map != NULL) - drm_legacy_ioremapfree(dev->agp_buffer_map, dev); + if (dev->legacy.agp_buffer_map != NULL) + drm_legacy_ioremapfree(dev->legacy.agp_buffer_map, dev); if (dev_priv->used_new_dma_init) { #if __OS_HAS_AGP @@ -972,7 +972,7 @@ static int mga_do_cleanup_dma(struct drm_device *dev, int full_cleanup) dev_priv->primary = NULL; dev_priv->sarea = NULL; dev_priv->sarea_priv = NULL; - dev->agp_buffer_map = NULL; + dev->legacy.agp_buffer_map = NULL; if (full_cleanup) { dev_priv->mmio = NULL; @@ -1091,7 +1091,7 @@ static int mga_dma_get_buffers(struct drm_device *dev, int mga_dma_buffers(struct drm_device *dev, void *data, struct drm_file *file_priv) { - struct drm_device_dma *dma = dev->dma; + struct drm_device_dma *dma = dev->legacy.dma; drm_mga_private_t *dev_priv = (drm_mga_private_t *) dev->dev_private; struct drm_dma *d = data; int ret = 0; diff --git a/drivers/gpu/drm/mga/mga_state.c b/drivers/gpu/drm/mga/mga_state.c index 792f924..e6ec73c 100644 --- a/drivers/gpu/drm/mga/mga_state.c +++ b/drivers/gpu/drm/mga/mga_state.c @@ -872,7 +872,7 @@ static int mga_dma_swap(struct drm_device *dev, void *data, struct drm_file *fil static int mga_dma_vertex(struct drm_device *dev, void *data, struct drm_file *file_priv) { drm_mga_private_t *dev_priv = dev->dev_private; - struct drm_device_dma *dma = dev->dma; + struct drm_device_dma *dma = dev->legacy.dma; struct drm_buf *buf; drm_mga_buf_priv_t *buf_priv; drm_mga_vertex_t *vertex = data; @@ -907,7 +907,7 @@ static int mga_dma_vertex(struct drm_device *dev, void *data, struct drm_file *f static int mga_dma_indices(struct drm_device *dev, void *data, struct drm_file *file_priv) { drm_mga_private_t *dev_priv = dev->dev_private; - struct drm_device_dma *dma = dev->dma; + struct drm_device_dma *dma = dev->legacy.dma; struct drm_buf *buf; drm_mga_buf_priv_t *buf_priv; drm_mga_indices_t *indices = data; @@ -941,7 +941,7 @@ static int mga_dma_indices(struct drm_device *dev, void *data, struct drm_file * static int mga_dma_iload(struct drm_device *dev, void *data, struct drm_file *file_priv) { - struct drm_device_dma *dma = dev->dma; + struct drm_device_dma *dma = dev->legacy.dma; drm_mga_private_t *dev_priv = dev->dev_private; struct drm_buf *buf; drm_mga_buf_priv_t *buf_priv; diff --git a/drivers/gpu/drm/r128/r128_cce.c b/drivers/gpu/drm/r128/r128_cce.c index 2c45ac9..fe1c310 100644 --- a/drivers/gpu/drm/r128/r128_cce.c +++ b/drivers/gpu/drm/r128/r128_cce.c @@ -317,7 +317,7 @@ static void r128_cce_init_ring_buffer(struct drm_device *dev, else #endif ring_start = dev_priv->cce_ring->offset - - (unsigned long)dev->sg->virtual; + (unsigned long)dev->legacy.sg->virtual; R128_WRITE(R128_PM4_BUFFER_OFFSET, ring_start | R128_AGP_OFFSET); @@ -357,7 +357,7 @@ static int r128_do_init_cce(struct drm_device *dev, drm_r128_init_t *init) dev_priv->is_pci = init->is_pci; - if (dev_priv->is_pci && !dev->sg) { + if (dev_priv->is_pci && !dev->legacy.sg) { DRM_ERROR("PCI GART memory not allocated!\n"); dev->dev_private = (void *)dev_priv; r128_do_cleanup_cce(dev); @@ -481,9 +481,9 @@ static int r128_do_init_cce(struct drm_device *dev, drm_r128_init_t *init) r128_do_cleanup_cce(dev); return -EINVAL; } - dev->agp_buffer_token = init->buffers_offset; - dev->agp_buffer_map = drm_legacy_findmap(dev, init->buffers_offset); - if (!dev->agp_buffer_map) { + dev->legacy.agp_buffer_token = init->buffers_offset; + dev->legacy.agp_buffer_map = drm_legacy_findmap(dev, init->buffers_offset); + if (!dev->legacy.agp_buffer_map) { DRM_ERROR("could not find dma buffer region!\n"); dev->dev_private = (void *)dev_priv; r128_do_cleanup_cce(dev); @@ -509,10 +509,10 @@ static int r128_do_init_cce(struct drm_device *dev, drm_r128_init_t *init) if (!dev_priv->is_pci) { drm_legacy_ioremap_wc(dev_priv->cce_ring, dev); drm_legacy_ioremap_wc(dev_priv->ring_rptr, dev); - drm_legacy_ioremap_wc(dev->agp_buffer_map, dev); + drm_legacy_ioremap_wc(dev->legacy.agp_buffer_map, dev); if (!dev_priv->cce_ring->handle || !dev_priv->ring_rptr->handle || - !dev->agp_buffer_map->handle) { + !dev->legacy.agp_buffer_map->handle) { DRM_ERROR("Could not ioremap agp regions!\n"); dev->dev_private = (void *)dev_priv; r128_do_cleanup_cce(dev); @@ -525,8 +525,8 @@ static int r128_do_init_cce(struct drm_device *dev, drm_r128_init_t *init) (void *)(unsigned long)dev_priv->cce_ring->offset; dev_priv->ring_rptr->handle = (void *)(unsigned long)dev_priv->ring_rptr->offset; - dev->agp_buffer_map->handle = - (void *)(unsigned long)dev->agp_buffer_map->offset; + dev->legacy.agp_buffer_map->handle = + (void *)(unsigned long)dev->legacy.agp_buffer_map->offset; } #if __OS_HAS_AGP @@ -534,7 +534,7 @@ static int r128_do_init_cce(struct drm_device *dev, drm_r128_init_t *init) dev_priv->cce_buffers_offset = dev->agp->base; else #endif - dev_priv->cce_buffers_offset = (unsigned long)dev->sg->virtual; + dev_priv->cce_buffers_offset = (unsigned long)dev->legacy.sg->virtual; dev_priv->ring.start = (u32 *) dev_priv->cce_ring->handle; dev_priv->ring.end = ((u32 *) dev_priv->cce_ring->handle @@ -606,9 +606,9 @@ int r128_do_cleanup_cce(struct drm_device *dev) drm_legacy_ioremapfree(dev_priv->cce_ring, dev); if (dev_priv->ring_rptr != NULL) drm_legacy_ioremapfree(dev_priv->ring_rptr, dev); - if (dev->agp_buffer_map != NULL) { - drm_legacy_ioremapfree(dev->agp_buffer_map, dev); - dev->agp_buffer_map = NULL; + if (dev->legacy.agp_buffer_map != NULL) { + drm_legacy_ioremapfree(dev->legacy.agp_buffer_map, dev); + dev->legacy.agp_buffer_map = NULL; } } else #endif @@ -764,7 +764,7 @@ int r128_fullscreen(struct drm_device *dev, void *data, struct drm_file *file_pr #if 0 static int r128_freelist_init(struct drm_device *dev) { - struct drm_device_dma *dma = dev->dma; + struct drm_device_dma *dma = dev->legacy.dma; drm_r128_private_t *dev_priv = dev->dev_private; struct drm_buf *buf; drm_r128_buf_priv_t *buf_priv; @@ -809,7 +809,7 @@ static int r128_freelist_init(struct drm_device *dev) static struct drm_buf *r128_freelist_get(struct drm_device * dev) { - struct drm_device_dma *dma = dev->dma; + struct drm_device_dma *dma = dev->legacy.dma; drm_r128_private_t *dev_priv = dev->dev_private; drm_r128_buf_priv_t *buf_priv; struct drm_buf *buf; @@ -847,7 +847,7 @@ static struct drm_buf *r128_freelist_get(struct drm_device * dev) void r128_freelist_reset(struct drm_device *dev) { - struct drm_device_dma *dma = dev->dma; + struct drm_device_dma *dma = dev->legacy.dma; int i; for (i = 0; i < dma->buf_count; i++) { @@ -906,7 +906,7 @@ static int r128_cce_get_buffers(struct drm_device *dev, int r128_cce_buffers(struct drm_device *dev, void *data, struct drm_file *file_priv) { - struct drm_device_dma *dma = dev->dma; + struct drm_device_dma *dma = dev->legacy.dma; int ret = 0; struct drm_dma *d = data; diff --git a/drivers/gpu/drm/r128/r128_state.c b/drivers/gpu/drm/r128/r128_state.c index 575e986..457883f 100644 --- a/drivers/gpu/drm/r128/r128_state.c +++ b/drivers/gpu/drm/r128/r128_state.c @@ -648,7 +648,7 @@ static void r128_cce_dispatch_indirect(struct drm_device *dev, */ if (dwords & 1) { u32 *data = (u32 *) - ((char *)dev->agp_buffer_map->handle + ((char *)dev->legacy.agp_buffer_map->handle + buf->offset + start); data[dwords++] = cpu_to_le32(R128_CCE_PACKET2); } @@ -693,7 +693,7 @@ static void r128_cce_dispatch_indices(struct drm_device *dev, drm_r128_buf_priv_t *buf_priv = buf->dev_private; drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv; int format = sarea_priv->vc_format; - int offset = dev->agp_buffer_map->offset - dev_priv->cce_buffers_offset; + int offset = dev->legacy.agp_buffer_map->offset - dev_priv->cce_buffers_offset; int prim = buf_priv->prim; u32 *data; int dwords; @@ -712,7 +712,7 @@ static void r128_cce_dispatch_indices(struct drm_device *dev, dwords = (end - start + 3) / sizeof(u32); - data = (u32 *) ((char *)dev->agp_buffer_map->handle + data = (u32 *) ((char *)dev->legacy.agp_buffer_map->handle + buf->offset + start); data[0] = cpu_to_le32(CCE_PACKET3(R128_3D_RNDR_GEN_INDX_PRIM, @@ -773,7 +773,7 @@ static int r128_cce_dispatch_blit(struct drm_device *dev, drm_r128_blit_t *blit) { drm_r128_private_t *dev_priv = dev->dev_private; - struct drm_device_dma *dma = dev->dma; + struct drm_device_dma *dma = dev->legacy.dma; struct drm_buf *buf; drm_r128_buf_priv_t *buf_priv; u32 *data; @@ -836,7 +836,7 @@ static int r128_cce_dispatch_blit(struct drm_device *dev, dwords = (blit->width * blit->height) >> dword_shift; - data = (u32 *) ((char *)dev->agp_buffer_map->handle + buf->offset); + data = (u32 *) ((char *)dev->legacy.agp_buffer_map->handle + buf->offset); data[0] = cpu_to_le32(CCE_PACKET3(R128_CNTL_HOSTDATA_BLT, dwords + 6)); data[1] = cpu_to_le32((R128_GMC_DST_PITCH_OFFSET_CNTL | @@ -1318,7 +1318,7 @@ static int r128_cce_swap(struct drm_device *dev, void *data, struct drm_file *fi static int r128_cce_vertex(struct drm_device *dev, void *data, struct drm_file *file_priv) { drm_r128_private_t *dev_priv = dev->dev_private; - struct drm_device_dma *dma = dev->dma; + struct drm_device_dma *dma = dev->legacy.dma; struct drm_buf *buf; drm_r128_buf_priv_t *buf_priv; drm_r128_vertex_t *vertex = data; @@ -1370,7 +1370,7 @@ static int r128_cce_vertex(struct drm_device *dev, void *data, struct drm_file * static int r128_cce_indices(struct drm_device *dev, void *data, struct drm_file *file_priv) { drm_r128_private_t *dev_priv = dev->dev_private; - struct drm_device_dma *dma = dev->dma; + struct drm_device_dma *dma = dev->legacy.dma; struct drm_buf *buf; drm_r128_buf_priv_t *buf_priv; drm_r128_indices_t *elts = data; @@ -1434,7 +1434,7 @@ static int r128_cce_indices(struct drm_device *dev, void *data, struct drm_file static int r128_cce_blit(struct drm_device *dev, void *data, struct drm_file *file_priv) { - struct drm_device_dma *dma = dev->dma; + struct drm_device_dma *dma = dev->legacy.dma; drm_r128_private_t *dev_priv = dev->dev_private; drm_r128_blit_t *blit = data; int ret; @@ -1516,7 +1516,7 @@ static int r128_cce_stipple(struct drm_device *dev, void *data, struct drm_file static int r128_cce_indirect(struct drm_device *dev, void *data, struct drm_file *file_priv) { drm_r128_private_t *dev_priv = dev->dev_private; - struct drm_device_dma *dma = dev->dma; + struct drm_device_dma *dma = dev->legacy.dma; struct drm_buf *buf; drm_r128_buf_priv_t *buf_priv; drm_r128_indirect_t *indirect = data; diff --git a/drivers/gpu/drm/radeon/r300_cmdbuf.c b/drivers/gpu/drm/radeon/r300_cmdbuf.c index 9418e38..679dd00 100644 --- a/drivers/gpu/drm/radeon/r300_cmdbuf.c +++ b/drivers/gpu/drm/radeon/r300_cmdbuf.c @@ -1015,7 +1015,7 @@ int r300_do_cp_cmdbuf(struct drm_device *dev, { drm_radeon_private_t *dev_priv = dev->dev_private; struct drm_radeon_master_private *master_priv = file_priv->master->driver_priv; - struct drm_device_dma *dma = dev->dma; + struct drm_device_dma *dma = dev->legacy.dma; struct drm_buf *buf = NULL; int emit_dispatch_age = 0; int ret = 0; diff --git a/drivers/gpu/drm/radeon/r600_blit.c b/drivers/gpu/drm/radeon/r600_blit.c index daf7572..adfc206 100644 --- a/drivers/gpu/drm/radeon/r600_blit.c +++ b/drivers/gpu/drm/radeon/r600_blit.c @@ -165,8 +165,8 @@ set_shaders(struct drm_device *dev) DRM_DEBUG("\n"); /* load shaders */ - vs = (u32 *) ((char *)dev->agp_buffer_map->handle + dev_priv->blit_vb->offset); - ps = (u32 *) ((char *)dev->agp_buffer_map->handle + dev_priv->blit_vb->offset + 256); + vs = (u32 *) ((char *)dev->legacy.agp_buffer_map->handle + dev_priv->blit_vb->offset); + ps = (u32 *) ((char *)dev->legacy.agp_buffer_map->handle + dev_priv->blit_vb->offset + 256); for (i = 0; i < r6xx_vs_size; i++) vs[i] = cpu_to_le32(r6xx_vs[i]); @@ -543,7 +543,7 @@ static void r600_nomm_put_vb(struct drm_device *dev) static void *r600_nomm_get_vb_ptr(struct drm_device *dev) { drm_radeon_private_t *dev_priv = dev->dev_private; - return (((char *)dev->agp_buffer_map->handle + + return (((char *)dev->legacy.agp_buffer_map->handle + dev_priv->blit_vb->offset + dev_priv->blit_vb->used)); } diff --git a/drivers/gpu/drm/radeon/r600_cp.c b/drivers/gpu/drm/radeon/r600_cp.c index 09e3f39..ad1f3ae 100644 --- a/drivers/gpu/drm/radeon/r600_cp.c +++ b/drivers/gpu/drm/radeon/r600_cp.c @@ -150,7 +150,7 @@ static int r600_do_wait_for_idle(drm_radeon_private_t *dev_priv) void r600_page_table_cleanup(struct drm_device *dev, struct drm_ati_pcigart_info *gart_info) { - struct drm_sg_mem *entry = dev->sg; + struct drm_sg_mem *entry = dev->legacy.sg; int max_pages; int pages; int i; @@ -180,7 +180,7 @@ int r600_page_table_init(struct drm_device *dev) drm_radeon_private_t *dev_priv = dev->dev_private; struct drm_ati_pcigart_info *gart_info = &dev_priv->gart_info; struct drm_local_map *map = &gart_info->mapping; - struct drm_sg_mem *entry = dev->sg; + struct drm_sg_mem *entry = dev->legacy.sg; int ret = 0; int i, j; int pages; @@ -1846,7 +1846,7 @@ static void r600_cp_init_ring_buffer(struct drm_device *dev, #endif { rptr_addr = dev_priv->ring_rptr->offset - - ((unsigned long) dev->sg->virtual) + - ((unsigned long) dev->legacy.sg->virtual) + dev_priv->gart_vm_start; } RADEON_WRITE(R600_CP_RB_RPTR_ADDR, (rptr_addr & 0xfffffffc)); @@ -1880,7 +1880,7 @@ static void r600_cp_init_ring_buffer(struct drm_device *dev, } else #endif ring_start = (dev_priv->cp_ring->offset - - (unsigned long)dev->sg->virtual + - (unsigned long)dev->legacy.sg->virtual + dev_priv->gart_vm_start); RADEON_WRITE(R600_CP_RB_BASE, ring_start >> 8); @@ -1956,9 +1956,9 @@ int r600_do_cleanup_cp(struct drm_device *dev) drm_legacy_ioremapfree(dev_priv->ring_rptr, dev); dev_priv->ring_rptr = NULL; } - if (dev->agp_buffer_map != NULL) { - drm_legacy_ioremapfree(dev->agp_buffer_map, dev); - dev->agp_buffer_map = NULL; + if (dev->legacy.agp_buffer_map != NULL) { + drm_legacy_ioremapfree(dev->legacy.agp_buffer_map, dev); + dev->legacy.agp_buffer_map = NULL; } } else #endif @@ -2071,9 +2071,9 @@ int r600_do_init_cp(struct drm_device *dev, drm_radeon_init_t *init, r600_do_cleanup_cp(dev); return -EINVAL; } - dev->agp_buffer_token = init->buffers_offset; - dev->agp_buffer_map = drm_legacy_findmap(dev, init->buffers_offset); - if (!dev->agp_buffer_map) { + dev->legacy.agp_buffer_token = init->buffers_offset; + dev->legacy.agp_buffer_map = drm_legacy_findmap(dev, init->buffers_offset); + if (!dev->legacy.agp_buffer_map) { DRM_ERROR("could not find dma buffer region!\n"); r600_do_cleanup_cp(dev); return -EINVAL; @@ -2094,10 +2094,10 @@ int r600_do_init_cp(struct drm_device *dev, drm_radeon_init_t *init, if (dev_priv->flags & RADEON_IS_AGP) { drm_legacy_ioremap_wc(dev_priv->cp_ring, dev); drm_legacy_ioremap_wc(dev_priv->ring_rptr, dev); - drm_legacy_ioremap_wc(dev->agp_buffer_map, dev); + drm_legacy_ioremap_wc(dev->legacy.agp_buffer_map, dev); if (!dev_priv->cp_ring->handle || !dev_priv->ring_rptr->handle || - !dev->agp_buffer_map->handle) { + !dev->legacy.agp_buffer_map->handle) { DRM_ERROR("could not find ioremap agp regions!\n"); r600_do_cleanup_cp(dev); return -EINVAL; @@ -2108,15 +2108,15 @@ int r600_do_init_cp(struct drm_device *dev, drm_radeon_init_t *init, dev_priv->cp_ring->handle = (void *)(unsigned long)dev_priv->cp_ring->offset; dev_priv->ring_rptr->handle = (void *)(unsigned long)dev_priv->ring_rptr->offset; - dev->agp_buffer_map->handle = - (void *)(unsigned long)dev->agp_buffer_map->offset; + dev->legacy.agp_buffer_map->handle = + (void *)(unsigned long)dev->legacy.agp_buffer_map->offset; DRM_DEBUG("dev_priv->cp_ring->handle %p\n", dev_priv->cp_ring->handle); DRM_DEBUG("dev_priv->ring_rptr->handle %p\n", dev_priv->ring_rptr->handle); - DRM_DEBUG("dev->agp_buffer_map->handle %p\n", - dev->agp_buffer_map->handle); + DRM_DEBUG("dev->legacy.agp_buffer_map->handle %p\n", + dev->legacy.agp_buffer_map->handle); } dev_priv->fb_location = (radeon_read_fb_location(dev_priv) & 0xffff) << 24; @@ -2178,13 +2178,13 @@ int r600_do_init_cp(struct drm_device *dev, drm_radeon_init_t *init, #if __OS_HAS_AGP /* XXX */ if (dev_priv->flags & RADEON_IS_AGP) - dev_priv->gart_buffers_offset = (dev->agp_buffer_map->offset + dev_priv->gart_buffers_offset = (dev->legacy.agp_buffer_map->offset - dev->agp->base + dev_priv->gart_vm_start); else #endif - dev_priv->gart_buffers_offset = (dev->agp_buffer_map->offset - - (unsigned long)dev->sg->virtual + dev_priv->gart_buffers_offset = (dev->legacy.agp_buffer_map->offset + - (unsigned long)dev->legacy.sg->virtual + dev_priv->gart_vm_start); DRM_DEBUG("fb 0x%08x size %d\n", @@ -2397,7 +2397,7 @@ int r600_cp_dispatch_indirect(struct drm_device *dev, */ while (dwords & 0xf) { u32 *data = (u32 *) - ((char *)dev->agp_buffer_map->handle + ((char *)dev->legacy.agp_buffer_map->handle + buf->offset + start); data[dwords++] = RADEON_CP_PACKET2; } @@ -2526,7 +2526,7 @@ int r600_cp_dispatch_texture(struct drm_device *dev, /* Dispatch the indirect buffer. */ buffer = - (u32 *) ((char *)dev->agp_buffer_map->handle + buf->offset); + (u32 *) ((char *)dev->legacy.agp_buffer_map->handle + buf->offset); if (copy_from_user(buffer, data, pass_size)) { DRM_ERROR("EFAULT on pad, %d bytes\n", pass_size); @@ -2634,7 +2634,7 @@ int r600_cs_legacy_ioctl(struct drm_device *dev, void *data, struct drm_file *fp DRM_ERROR("ib_get failed\n"); goto out; } - ib = dev->agp_buffer_map->handle + buf->offset; + ib = dev->legacy.agp_buffer_map->handle + buf->offset; /* now parse command stream */ r = r600_cs_legacy(dev, data, fpriv, family, ib, &l); if (r) { diff --git a/drivers/gpu/drm/radeon/radeon_cp.c b/drivers/gpu/drm/radeon/radeon_cp.c index ea134a7..8049e58 100644 --- a/drivers/gpu/drm/radeon/radeon_cp.c +++ b/drivers/gpu/drm/radeon/radeon_cp.c @@ -777,7 +777,7 @@ static void radeon_cp_init_ring_buffer(struct drm_device * dev, } else #endif ring_start = (dev_priv->cp_ring->offset - - (unsigned long)dev->sg->virtual + - (unsigned long)dev->legacy.sg->virtual + dev_priv->gart_vm_start); RADEON_WRITE(RADEON_CP_RB_BASE, ring_start); @@ -801,7 +801,7 @@ static void radeon_cp_init_ring_buffer(struct drm_device * dev, { RADEON_WRITE(RADEON_CP_RB_RPTR_ADDR, dev_priv->ring_rptr->offset - - ((unsigned long) dev->sg->virtual) + - ((unsigned long) dev->legacy.sg->virtual) + dev_priv->gart_vm_start); } @@ -1193,7 +1193,7 @@ static int radeon_do_init_cp(struct drm_device *dev, drm_radeon_init_t *init, dev_priv->flags |= RADEON_IS_AGP; } - if ((!(dev_priv->flags & RADEON_IS_AGP)) && !dev->sg) { + if ((!(dev_priv->flags & RADEON_IS_AGP)) && !dev->legacy.sg) { DRM_ERROR("PCI GART memory not allocated!\n"); radeon_do_cleanup_cp(dev); return -EINVAL; @@ -1317,9 +1317,9 @@ static int radeon_do_init_cp(struct drm_device *dev, drm_radeon_init_t *init, radeon_do_cleanup_cp(dev); return -EINVAL; } - dev->agp_buffer_token = init->buffers_offset; - dev->agp_buffer_map = drm_legacy_findmap(dev, init->buffers_offset); - if (!dev->agp_buffer_map) { + dev->legacy.agp_buffer_token = init->buffers_offset; + dev->legacy.agp_buffer_map = drm_legacy_findmap(dev, init->buffers_offset); + if (!dev->legacy.agp_buffer_map) { DRM_ERROR("could not find dma buffer region!\n"); radeon_do_cleanup_cp(dev); return -EINVAL; @@ -1339,10 +1339,10 @@ static int radeon_do_init_cp(struct drm_device *dev, drm_radeon_init_t *init, if (dev_priv->flags & RADEON_IS_AGP) { drm_legacy_ioremap_wc(dev_priv->cp_ring, dev); drm_legacy_ioremap_wc(dev_priv->ring_rptr, dev); - drm_legacy_ioremap_wc(dev->agp_buffer_map, dev); + drm_legacy_ioremap_wc(dev->legacy.agp_buffer_map, dev); if (!dev_priv->cp_ring->handle || !dev_priv->ring_rptr->handle || - !dev->agp_buffer_map->handle) { + !dev->legacy.agp_buffer_map->handle) { DRM_ERROR("could not find ioremap agp regions!\n"); radeon_do_cleanup_cp(dev); return -EINVAL; @@ -1354,15 +1354,15 @@ static int radeon_do_init_cp(struct drm_device *dev, drm_radeon_init_t *init, (void *)(unsigned long)dev_priv->cp_ring->offset; dev_priv->ring_rptr->handle = (void *)(unsigned long)dev_priv->ring_rptr->offset; - dev->agp_buffer_map->handle = - (void *)(unsigned long)dev->agp_buffer_map->offset; + dev->legacy.agp_buffer_map->handle = + (void *)(unsigned long)dev->legacy.agp_buffer_map->offset; DRM_DEBUG("dev_priv->cp_ring->handle %p\n", dev_priv->cp_ring->handle); DRM_DEBUG("dev_priv->ring_rptr->handle %p\n", dev_priv->ring_rptr->handle); - DRM_DEBUG("dev->agp_buffer_map->handle %p\n", - dev->agp_buffer_map->handle); + DRM_DEBUG("dev->legacy.agp_buffer_map->handle %p\n", + dev->legacy.agp_buffer_map->handle); } dev_priv->fb_location = (radeon_read_fb_location(dev_priv) & 0xffff) << 16; @@ -1426,13 +1426,13 @@ static int radeon_do_init_cp(struct drm_device *dev, drm_radeon_init_t *init, #if __OS_HAS_AGP if (dev_priv->flags & RADEON_IS_AGP) - dev_priv->gart_buffers_offset = (dev->agp_buffer_map->offset + dev_priv->gart_buffers_offset = (dev->legacy.agp_buffer_map->offset - dev->agp->base + dev_priv->gart_vm_start); else #endif - dev_priv->gart_buffers_offset = (dev->agp_buffer_map->offset - - (unsigned long)dev->sg->virtual + dev_priv->gart_buffers_offset = (dev->legacy.agp_buffer_map->offset + - (unsigned long)dev->legacy.sg->virtual + dev_priv->gart_vm_start); DRM_DEBUG("dev_priv->gart_size %d\n", dev_priv->gart_size); @@ -1576,9 +1576,9 @@ static int radeon_do_cleanup_cp(struct drm_device * dev) drm_legacy_ioremapfree(dev_priv->ring_rptr, dev); dev_priv->ring_rptr = NULL; } - if (dev->agp_buffer_map != NULL) { - drm_legacy_ioremapfree(dev->agp_buffer_map, dev); - dev->agp_buffer_map = NULL; + if (dev->legacy.agp_buffer_map != NULL) { + drm_legacy_ioremapfree(dev->legacy.agp_buffer_map, dev); + dev->legacy.agp_buffer_map = NULL; } } else #endif @@ -1918,7 +1918,7 @@ int radeon_fullscreen(struct drm_device *dev, void *data, struct drm_file *file_ struct drm_buf *radeon_freelist_get(struct drm_device * dev) { - struct drm_device_dma *dma = dev->dma; + struct drm_device_dma *dma = dev->legacy.dma; drm_radeon_private_t *dev_priv = dev->dev_private; drm_radeon_buf_priv_t *buf_priv; struct drm_buf *buf; @@ -1958,7 +1958,7 @@ struct drm_buf *radeon_freelist_get(struct drm_device * dev) void radeon_freelist_reset(struct drm_device * dev) { - struct drm_device_dma *dma = dev->dma; + struct drm_device_dma *dma = dev->legacy.dma; drm_radeon_private_t *dev_priv = dev->dev_private; int i; @@ -2034,7 +2034,7 @@ static int radeon_cp_get_buffers(struct drm_device *dev, int radeon_cp_buffers(struct drm_device *dev, void *data, struct drm_file *file_priv) { - struct drm_device_dma *dma = dev->dma; + struct drm_device_dma *dma = dev->legacy.dma; int ret = 0; struct drm_dma *d = data; diff --git a/drivers/gpu/drm/radeon/radeon_state.c b/drivers/gpu/drm/radeon/radeon_state.c index 535403e..df7e98d6 100644 --- a/drivers/gpu/drm/radeon/radeon_state.c +++ b/drivers/gpu/drm/radeon/radeon_state.c @@ -1617,7 +1617,7 @@ static void radeon_cp_dispatch_indirect(struct drm_device * dev, */ if (dwords & 1) { u32 *data = (u32 *) - ((char *)dev->agp_buffer_map->handle + ((char *)dev->legacy.agp_buffer_map->handle + buf->offset + start); data[dwords++] = RADEON_CP_PACKET2; } @@ -1666,7 +1666,7 @@ static void radeon_cp_dispatch_indices(struct drm_device *dev, dwords = (prim->finish - prim->start + 3) / sizeof(u32); - data = (u32 *) ((char *)dev->agp_buffer_map->handle + + data = (u32 *) ((char *)dev->legacy.agp_buffer_map->handle + elt_buf->offset + prim->start); data[0] = CP_PACKET3(RADEON_3D_RNDR_GEN_INDX_PRIM, dwords - 2); @@ -1818,7 +1818,7 @@ static int radeon_cp_dispatch_texture(struct drm_device * dev, /* Dispatch the indirect buffer. */ buffer = - (u32 *) ((char *)dev->agp_buffer_map->handle + buf->offset); + (u32 *) ((char *)dev->legacy.agp_buffer_map->handle + buf->offset); dwords = size / 4; #define RADEON_COPY_MT(_buf, _data, _width) \ @@ -2257,7 +2257,7 @@ static int radeon_cp_vertex(struct drm_device *dev, void *data, struct drm_file drm_radeon_private_t *dev_priv = dev->dev_private; struct drm_radeon_master_private *master_priv = file_priv->master->driver_priv; drm_radeon_sarea_t *sarea_priv; - struct drm_device_dma *dma = dev->dma; + struct drm_device_dma *dma = dev->legacy.dma; struct drm_buf *buf; drm_radeon_vertex_t *vertex = data; drm_radeon_tcl_prim_t prim; @@ -2336,7 +2336,7 @@ static int radeon_cp_indices(struct drm_device *dev, void *data, struct drm_file drm_radeon_private_t *dev_priv = dev->dev_private; struct drm_radeon_master_private *master_priv = file_priv->master->driver_priv; drm_radeon_sarea_t *sarea_priv; - struct drm_device_dma *dma = dev->dma; + struct drm_device_dma *dma = dev->legacy.dma; struct drm_buf *buf; drm_radeon_indices_t *elts = data; drm_radeon_tcl_prim_t prim; @@ -2474,7 +2474,7 @@ static int radeon_cp_stipple(struct drm_device *dev, void *data, struct drm_file static int radeon_cp_indirect(struct drm_device *dev, void *data, struct drm_file *file_priv) { drm_radeon_private_t *dev_priv = dev->dev_private; - struct drm_device_dma *dma = dev->dma; + struct drm_device_dma *dma = dev->legacy.dma; struct drm_buf *buf; drm_radeon_indirect_t *indirect = data; RING_LOCALS; @@ -2543,7 +2543,7 @@ static int radeon_cp_vertex2(struct drm_device *dev, void *data, struct drm_file drm_radeon_private_t *dev_priv = dev->dev_private; struct drm_radeon_master_private *master_priv = file_priv->master->driver_priv; drm_radeon_sarea_t *sarea_priv; - struct drm_device_dma *dma = dev->dma; + struct drm_device_dma *dma = dev->legacy.dma; struct drm_buf *buf; drm_radeon_vertex2_t *vertex = data; int i; @@ -2868,7 +2868,7 @@ static int radeon_cp_cmdbuf(struct drm_device *dev, void *data, struct drm_file *file_priv) { drm_radeon_private_t *dev_priv = dev->dev_private; - struct drm_device_dma *dma = dev->dma; + struct drm_device_dma *dma = dev->legacy.dma; struct drm_buf *buf = NULL; drm_radeon_cmd_header_t stack_header; int idx; diff --git a/drivers/gpu/drm/savage/savage_bci.c b/drivers/gpu/drm/savage/savage_bci.c index d47dff9..0e232a9 100644 --- a/drivers/gpu/drm/savage/savage_bci.c +++ b/drivers/gpu/drm/savage/savage_bci.c @@ -206,7 +206,7 @@ uint16_t savage_bci_emit_event(drm_savage_private_t * dev_priv, static int savage_freelist_init(struct drm_device * dev) { drm_savage_private_t *dev_priv = dev->dev_private; - struct drm_device_dma *dma = dev->dma; + struct drm_device_dma *dma = dev->legacy.dma; struct drm_buf *buf; drm_savage_buf_priv_t *entry; int i; @@ -716,16 +716,16 @@ static int savage_do_init_bci(struct drm_device * dev, drm_savage_init_t * init) dev_priv->status = NULL; } if (dev_priv->dma_type == SAVAGE_DMA_AGP && init->buffers_offset) { - dev->agp_buffer_token = init->buffers_offset; - dev->agp_buffer_map = drm_legacy_findmap(dev, + dev->legacy.agp_buffer_token = init->buffers_offset; + dev->legacy.agp_buffer_map = drm_legacy_findmap(dev, init->buffers_offset); - if (!dev->agp_buffer_map) { + if (!dev->legacy.agp_buffer_map) { DRM_ERROR("could not find DMA buffer region!\n"); savage_do_cleanup_bci(dev); return -EINVAL; } - drm_legacy_ioremap(dev->agp_buffer_map, dev); - if (!dev->agp_buffer_map->handle) { + drm_legacy_ioremap(dev->legacy.agp_buffer_map, dev); + if (!dev->legacy.agp_buffer_map->handle) { DRM_ERROR("failed to ioremap DMA buffer region!\n"); savage_do_cleanup_bci(dev); return -ENOMEM; @@ -750,7 +750,7 @@ static int savage_do_init_bci(struct drm_device * dev, drm_savage_init_t * init) savage_do_cleanup_bci(dev); return -EINVAL; } - if (dev->dma && dev->dma->buflist) { + if (dev->legacy.dma && dev->legacy.dma->buflist) { DRM_ERROR("command and vertex DMA not supported " "at the same time.\n"); savage_do_cleanup_bci(dev); @@ -898,12 +898,12 @@ static int savage_do_cleanup_bci(struct drm_device * dev) drm_legacy_ioremapfree(dev_priv->cmd_dma, dev); if (dev_priv->dma_type == SAVAGE_DMA_AGP && - dev->agp_buffer_map && dev->agp_buffer_map->handle) { - drm_legacy_ioremapfree(dev->agp_buffer_map, dev); + dev->legacy.agp_buffer_map && dev->legacy.agp_buffer_map->handle) { + drm_legacy_ioremapfree(dev->legacy.agp_buffer_map, dev); /* make sure the next instance (which may be running * in PCI mode) doesn't try to use an old - * agp_buffer_map. */ - dev->agp_buffer_map = NULL; + * legacy.agp_buffer_map. */ + dev->legacy.agp_buffer_map = NULL; } kfree(dev_priv->dma_pages); @@ -1005,7 +1005,7 @@ static int savage_bci_get_buffers(struct drm_device *dev, int savage_bci_buffers(struct drm_device *dev, void *data, struct drm_file *file_priv) { - struct drm_device_dma *dma = dev->dma; + struct drm_device_dma *dma = dev->legacy.dma; struct drm_dma *d = data; int ret = 0; @@ -1038,7 +1038,7 @@ int savage_bci_buffers(struct drm_device *dev, void *data, struct drm_file *file void savage_reclaim_buffers(struct drm_device *dev, struct drm_file *file_priv) { - struct drm_device_dma *dma = dev->dma; + struct drm_device_dma *dma = dev->legacy.dma; drm_savage_private_t *dev_priv = dev->dev_private; int release_idlelock = 0; int i; diff --git a/drivers/gpu/drm/savage/savage_state.c b/drivers/gpu/drm/savage/savage_state.c index c01ad0a..060693e 100644 --- a/drivers/gpu/drm/savage/savage_state.c +++ b/drivers/gpu/drm/savage/savage_state.c @@ -956,7 +956,7 @@ static int savage_dispatch_draw(drm_savage_private_t * dev_priv, int savage_bci_cmdbuf(struct drm_device *dev, void *data, struct drm_file *file_priv) { drm_savage_private_t *dev_priv = dev->dev_private; - struct drm_device_dma *dma = dev->dma; + struct drm_device_dma *dma = dev->legacy.dma; struct drm_buf *dmabuf; drm_savage_cmdbuf_t *cmdbuf = data; drm_savage_cmd_header_t *kcmd_addr = NULL; diff --git a/drivers/gpu/drm/sis/sis_mm.c b/drivers/gpu/drm/sis/sis_mm.c index 93ad8a5..7f89157 100644 --- a/drivers/gpu/drm/sis/sis_mm.c +++ b/drivers/gpu/drm/sis/sis_mm.c @@ -226,7 +226,7 @@ static drm_local_map_t *sis_reg_init(struct drm_device *dev) struct drm_map_list *entry; drm_local_map_t *map; - list_for_each_entry(entry, &dev->maplist, head) { + list_for_each_entry(entry, &dev->legacy.maplist, head) { map = entry->map; if (!map) continue; diff --git a/drivers/gpu/drm/via/via_mm.c b/drivers/gpu/drm/via/via_mm.c index 4f20742..25dc561 100644 --- a/drivers/gpu/drm/via/via_mm.c +++ b/drivers/gpu/drm/via/via_mm.c @@ -79,7 +79,7 @@ int via_final_context(struct drm_device *dev, int context) /* Linux specific until context tracking code gets ported to BSD */ /* Last context, perform cleanup */ - if (list_is_singular(&dev->ctxlist)) { + if (list_is_singular(&dev->legacy.ctxlist)) { DRM_DEBUG("Last Context\n"); drm_irq_uninstall(dev); via_cleanup_futex(dev_priv); diff --git a/drivers/gpu/drm/via/via_verifier.c b/drivers/gpu/drm/via/via_verifier.c index 0677bbf..2df6396 100644 --- a/drivers/gpu/drm/via/via_verifier.c +++ b/drivers/gpu/drm/via/via_verifier.c @@ -262,7 +262,7 @@ static __inline__ drm_local_map_t *via_drm_lookup_agp_map(drm_via_state_t *seq, return map; } - list_for_each_entry(r_list, &dev->maplist, head) { + list_for_each_entry(r_list, &dev->legacy.maplist, head) { map = r_list->map; if (!map) continue; diff --git a/include/drm/drmP.h b/include/drm/drmP.h index d2c2b7f..231958d 100644 --- a/include/drm/drmP.h +++ b/include/drm/drmP.h @@ -682,6 +682,56 @@ struct drm_vblank_crtc { }; /** + * drm legacy device structures. This structure groups together + * objects required for legacy drivers. New drivers shouldn't be using + * anything in here. + */ +struct drm_legacy_device { + /** \name Usage Counters */ + /*@{ */ + spinlock_t buf_lock; /**< For drm_device::buf_use and a few other things. */ + int buf_use; /**< Buffers in use -- cannot alloc */ + atomic_t buf_alloc; /**< Buffer allocation in progress */ + /*@} */ + + /** \name Memory management */ + /*@{ */ + struct list_head maplist; /**< Linked list of regions */ + struct drm_open_hash map_hash; /**< User token hash table for maps */ + /*@} */ + + /** \name Context handle management */ + /*@{ */ + struct list_head ctxlist; /**< Linked list of context handles */ + struct mutex ctxlist_mutex; /**< For ctxlist */ + + struct idr ctx_idr; + /*@} */ + + /** \name Context support */ + /*@{ */ + __volatile__ long context_flag; /**< Context swapping flag */ + int last_context; /**< Last current context */ + /*@} */ + + /** \name DMA support */ + /*@{ */ + struct drm_device_dma *dma; /**< Optional pointer for DMA support */ + /*@} */ + struct drm_local_map *agp_buffer_map; + unsigned int agp_buffer_token; + + struct drm_sg_mem *sg; /**< Scatter gather memory */ + + struct { + int context; + struct drm_hw_lock *lock; + } sigdata; + + sigset_t sigmask; +}; + +/** * DRM device structure. This structure represent a complete card that * may contain multiple heads. */ @@ -712,41 +762,17 @@ struct drm_device { /** \name Usage Counters */ /*@{ */ int open_count; /**< Outstanding files open, protected by drm_global_mutex. */ - spinlock_t buf_lock; /**< For drm_device::buf_use and a few other things. */ - int buf_use; /**< Buffers in use -- cannot alloc */ - atomic_t buf_alloc; /**< Buffer allocation in progress */ /*@} */ struct list_head filelist; - /** \name Memory management */ - /*@{ */ - struct list_head maplist; /**< Linked list of regions */ - struct drm_open_hash map_hash; /**< User token hash table for maps */ - - /** \name Context handle management */ - /*@{ */ - struct list_head ctxlist; /**< Linked list of context handles */ - struct mutex ctxlist_mutex; /**< For ctxlist */ - - struct idr ctx_idr; - struct list_head vmalist; /**< List of vmas (for debugging) */ - /*@} */ - - /** \name DMA support */ - /*@{ */ - struct drm_device_dma *dma; /**< Optional pointer for DMA support */ - /*@} */ - - /** \name Context support */ + /** \name IRQ support */ /*@{ */ bool irq_enabled; /**< True if irq handler is enabled */ int irq; - __volatile__ long context_flag; /**< Context swapping flag */ - int last_context; /**< Last current context */ /*@} */ /** \name VBLANK IRQ support */ @@ -783,7 +809,6 @@ struct drm_device { */ struct list_head vblank_event_list; spinlock_t event_lock; - /*@} */ struct drm_agp_head *agp; /**< AGP data */ @@ -795,18 +820,7 @@ struct drm_device { struct platform_device *platformdev; /**< Platform device struture */ - struct drm_sg_mem *sg; /**< Scatter gather memory */ unsigned int num_crtcs; /**< Number of CRTCs on this device */ - sigset_t sigmask; - - struct { - int context; - struct drm_hw_lock *lock; - } sigdata; - - struct drm_local_map *agp_buffer_map; - unsigned int agp_buffer_token; - struct drm_mode_config mode_config; /**< Current mode config */ /** \name GEM information */ @@ -816,6 +830,11 @@ struct drm_device { struct drm_vma_offset_manager *vma_offset_manager; /*@} */ int switch_power_state; + + /** \name legacy driver information */ + /*@{ */ + struct drm_legacy_device legacy; + /*@} */ }; #define DRM_SWITCH_POWER_ON 0 diff --git a/include/drm/drm_legacy.h b/include/drm/drm_legacy.h index 3e69803..02d3919 100644 --- a/include/drm/drm_legacy.h +++ b/include/drm/drm_legacy.h @@ -194,7 +194,7 @@ static __inline__ struct drm_local_map *drm_legacy_findmap(struct drm_device *de unsigned int token) { struct drm_map_list *_entry; - list_for_each_entry(_entry, &dev->maplist, head) + list_for_each_entry(_entry, &dev->legacy.maplist, head) if (_entry->user_token == token) return _entry->map; return NULL; -- 1.9.3 _______________________________________________ dri-devel mailing list dri-devel@xxxxxxxxxxxxxxxxxxxxx http://lists.freedesktop.org/mailman/listinfo/dri-devel