3 * File operations for DRM
5 * \author Rickard E. (Rik) Faith <faith@valinux.com>
6 * \author Daryll Strauss <daryll@valinux.com>
7 * \author Gareth Hughes <gareth@valinux.com>
11 * Created: Mon Jan 4 08:58:31 1999 by faith@valinux.com
13 * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
14 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
15 * All Rights Reserved.
17 * Permission is hereby granted, free of charge, to any person obtaining a
18 * copy of this software and associated documentation files (the "Software"),
19 * to deal in the Software without restriction, including without limitation
20 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
21 * and/or sell copies of the Software, and to permit persons to whom the
22 * Software is furnished to do so, subject to the following conditions:
24 * The above copyright notice and this permission notice (including the next
25 * paragraph) shall be included in all copies or substantial portions of the
28 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
29 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
30 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
31 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
32 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
33 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
34 * OTHER DEALINGS IN THE SOFTWARE.
38 #include <linux/poll.h>
39 #include <linux/slab.h>
40 #include <linux/module.h>
41 #include "drm_legacy.h"
42 #include "drm_internal.h"
44 /* from BKL pushdown */
45 DEFINE_MUTEX(drm_global_mutex);
47 static int drm_open_helper(struct file *filp, struct drm_minor *minor);
49 static int drm_setup(struct drm_device * dev)
53 if (dev->driver->firstopen &&
54 !drm_core_check_feature(dev, DRIVER_MODESET)) {
55 ret = dev->driver->firstopen(dev);
60 ret = drm_legacy_dma_setup(dev);
72 * \param inode device inode
73 * \param filp file pointer.
74 * \return zero on success or a negative number on failure.
76 * Searches the DRM device with the same minor number, calls open_helper(), and
77 * increments the device open count. If the open count was previous at zero,
78 * i.e., it's the first that the device is open, then calls setup().
80 int drm_open(struct inode *inode, struct file *filp)
82 struct drm_device *dev;
83 struct drm_minor *minor;
87 minor = drm_minor_acquire(iminor(inode));
89 return PTR_ERR(minor);
92 if (!dev->open_count++)
95 /* share address_space across all char-devs of a single device */
96 filp->f_mapping = dev->anon_inode->i_mapping;
98 retcode = drm_open_helper(filp, minor);
102 retcode = drm_setup(dev);
110 drm_minor_release(minor);
113 EXPORT_SYMBOL(drm_open);
116 * Check whether DRI will run on this CPU.
118 * \return non-zero if the DRI will run on this CPU, or zero otherwise.
120 static int drm_cpu_valid(void)
122 #if defined(__sparc__) && !defined(__sparc_v9__)
123 return 0; /* No cmpxchg before v9 sparc. */
129 * Called whenever a process opens /dev/drm.
131 * \param filp file pointer.
132 * \param minor acquired minor-object.
133 * \return zero on success or a negative number on failure.
135 * Creates and initializes a drm_file structure for the file private data in \p
136 * filp and add it into the double linked list in \p dev.
138 static int drm_open_helper(struct file *filp, struct drm_minor *minor)
140 struct drm_device *dev = minor->dev;
141 struct drm_file *priv;
144 if (filp->f_flags & O_EXCL)
145 return -EBUSY; /* No exclusive opens */
146 if (!drm_cpu_valid())
148 if (dev->switch_power_state != DRM_SWITCH_POWER_ON && dev->switch_power_state != DRM_SWITCH_POWER_DYNAMIC_OFF)
151 DRM_DEBUG("pid = %d, minor = %d\n", task_pid_nr(current), minor->index);
153 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
157 filp->private_data = priv;
159 priv->uid = current_euid();
160 priv->pid = get_pid(task_pid(current));
163 /* for compatibility root is always authenticated */
164 priv->authenticated = capable(CAP_SYS_ADMIN);
165 priv->lock_count = 0;
167 INIT_LIST_HEAD(&priv->lhead);
168 INIT_LIST_HEAD(&priv->fbs);
169 mutex_init(&priv->fbs_lock);
170 INIT_LIST_HEAD(&priv->blobs);
171 INIT_LIST_HEAD(&priv->event_list);
172 init_waitqueue_head(&priv->event_wait);
173 priv->event_space = 4096; /* set aside 4k for event buffer */
175 if (drm_core_check_feature(dev, DRIVER_GEM))
176 drm_gem_open(dev, priv);
178 if (drm_core_check_feature(dev, DRIVER_PRIME))
179 drm_prime_init_file_private(&priv->prime);
181 if (dev->driver->open) {
182 ret = dev->driver->open(dev, priv);
184 goto out_prime_destroy;
187 /* if there is no current master make this fd it, but do not create
188 * any master object for render clients */
189 mutex_lock(&dev->master_mutex);
190 if (drm_is_primary_client(priv) && !priv->minor->master) {
191 /* create a new master */
192 priv->minor->master = drm_master_create(priv->minor);
193 if (!priv->minor->master) {
199 /* take another reference for the copy in the local file priv */
200 priv->master = drm_master_get(priv->minor->master);
201 priv->authenticated = 1;
203 if (dev->driver->master_create) {
204 ret = dev->driver->master_create(dev, priv->master);
206 /* drop both references if this fails */
207 drm_master_put(&priv->minor->master);
208 drm_master_put(&priv->master);
212 if (dev->driver->master_set) {
213 ret = dev->driver->master_set(dev, priv, true);
215 /* drop both references if this fails */
216 drm_master_put(&priv->minor->master);
217 drm_master_put(&priv->master);
221 } else if (drm_is_primary_client(priv)) {
222 /* get a reference to the master */
223 priv->master = drm_master_get(priv->minor->master);
225 mutex_unlock(&dev->master_mutex);
227 mutex_lock(&dev->struct_mutex);
228 list_add(&priv->lhead, &dev->filelist);
229 mutex_unlock(&dev->struct_mutex);
236 struct pci_dev *pci_dev;
237 pci_dev = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, NULL);
239 dev->hose = pci_dev->sysdata;
240 pci_dev_put(pci_dev);
243 struct pci_bus *b = list_entry(pci_root_buses.next,
244 struct pci_bus, node);
246 dev->hose = b->sysdata;
254 mutex_unlock(&dev->master_mutex);
255 if (dev->driver->postclose)
256 dev->driver->postclose(dev, priv);
258 if (drm_core_check_feature(dev, DRIVER_PRIME))
259 drm_prime_destroy_file_private(&priv->prime);
260 if (drm_core_check_feature(dev, DRIVER_GEM))
261 drm_gem_release(dev, priv);
264 filp->private_data = NULL;
268 static void drm_master_release(struct drm_device *dev, struct file *filp)
270 struct drm_file *file_priv = filp->private_data;
272 if (drm_legacy_i_have_hw_lock(dev, file_priv)) {
273 DRM_DEBUG("File %p released, freeing lock for context %d\n",
274 filp, _DRM_LOCKING_CONTEXT(file_priv->master->lock.hw_lock->lock));
275 drm_legacy_lock_free(&file_priv->master->lock,
276 _DRM_LOCKING_CONTEXT(file_priv->master->lock.hw_lock->lock));
280 static void drm_events_release(struct drm_file *file_priv)
282 struct drm_device *dev = file_priv->minor->dev;
283 struct drm_pending_event *e, *et;
284 struct drm_pending_vblank_event *v, *vt;
287 spin_lock_irqsave(&dev->event_lock, flags);
289 /* Remove pending flips */
290 list_for_each_entry_safe(v, vt, &dev->vblank_event_list, base.link)
291 if (v->base.file_priv == file_priv) {
292 list_del(&v->base.link);
293 drm_vblank_put(dev, v->pipe);
294 v->base.destroy(&v->base);
297 /* Remove unconsumed events */
298 list_for_each_entry_safe(e, et, &file_priv->event_list, link) {
303 spin_unlock_irqrestore(&dev->event_lock, flags);
307 * drm_legacy_dev_reinit
309 * Reinitializes a legacy/ums drm device in it's lastclose function.
311 static void drm_legacy_dev_reinit(struct drm_device *dev)
313 if (drm_core_check_feature(dev, DRIVER_MODESET))
316 dev->sigdata.lock = NULL;
318 dev->context_flag = 0;
319 dev->last_context = 0;
324 * Take down the DRM device.
326 * \param dev DRM device structure.
328 * Frees every resource in \p dev.
332 int drm_lastclose(struct drm_device * dev)
336 if (dev->driver->lastclose)
337 dev->driver->lastclose(dev);
338 DRM_DEBUG("driver lastclose completed\n");
340 if (dev->irq_enabled && !drm_core_check_feature(dev, DRIVER_MODESET))
341 drm_irq_uninstall(dev);
343 mutex_lock(&dev->struct_mutex);
347 drm_legacy_sg_cleanup(dev);
348 drm_legacy_vma_flush(dev);
349 drm_legacy_dma_takedown(dev);
351 mutex_unlock(&dev->struct_mutex);
353 drm_legacy_dev_reinit(dev);
355 DRM_DEBUG("lastclose completed\n");
362 * \param inode device inode
363 * \param file_priv DRM file private.
364 * \return zero on success or a negative number on failure.
366 * If the hardware lock is held then free it, and take it again for the kernel
367 * context since it's necessary to reclaim buffers. Unlink the file private
368 * data from its list and free it. Decreases the open count and if it reaches
369 * zero calls drm_lastclose().
371 int drm_release(struct inode *inode, struct file *filp)
373 struct drm_file *file_priv = filp->private_data;
374 struct drm_minor *minor = file_priv->minor;
375 struct drm_device *dev = minor->dev;
378 mutex_lock(&drm_global_mutex);
380 DRM_DEBUG("open_count = %d\n", dev->open_count);
382 mutex_lock(&dev->struct_mutex);
383 list_del(&file_priv->lhead);
384 if (file_priv->magic)
385 idr_remove(&file_priv->master->magic_map, file_priv->magic);
386 mutex_unlock(&dev->struct_mutex);
388 if (dev->driver->preclose)
389 dev->driver->preclose(dev, file_priv);
391 /* ========================================================
392 * Begin inline drm_release
395 DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %d\n",
396 task_pid_nr(current),
397 (long)old_encode_dev(file_priv->minor->kdev->devt),
400 /* if the master has gone away we can't do anything with the lock */
401 if (file_priv->minor->master)
402 drm_master_release(dev, filp);
404 if (drm_core_check_feature(dev, DRIVER_HAVE_DMA))
405 drm_legacy_reclaim_buffers(dev, file_priv);
407 drm_events_release(file_priv);
409 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
410 drm_fb_release(file_priv);
411 drm_property_destroy_user_blobs(dev, file_priv);
414 if (drm_core_check_feature(dev, DRIVER_GEM))
415 drm_gem_release(dev, file_priv);
417 drm_legacy_ctxbitmap_flush(dev, file_priv);
419 mutex_lock(&dev->master_mutex);
421 if (file_priv->is_master) {
422 struct drm_master *master = file_priv->master;
425 * Since the master is disappearing, so is the
426 * possibility to lock.
428 mutex_lock(&dev->struct_mutex);
429 if (master->lock.hw_lock) {
430 if (dev->sigdata.lock == master->lock.hw_lock)
431 dev->sigdata.lock = NULL;
432 master->lock.hw_lock = NULL;
433 master->lock.file_priv = NULL;
434 wake_up_interruptible_all(&master->lock.lock_queue);
436 mutex_unlock(&dev->struct_mutex);
438 if (file_priv->minor->master == file_priv->master) {
439 /* drop the reference held my the minor */
440 if (dev->driver->master_drop)
441 dev->driver->master_drop(dev, file_priv, true);
442 drm_master_put(&file_priv->minor->master);
446 /* drop the master reference held by the file priv */
447 if (file_priv->master)
448 drm_master_put(&file_priv->master);
449 file_priv->is_master = 0;
450 mutex_unlock(&dev->master_mutex);
452 if (dev->driver->postclose)
453 dev->driver->postclose(dev, file_priv);
456 if (drm_core_check_feature(dev, DRIVER_PRIME))
457 drm_prime_destroy_file_private(&file_priv->prime);
459 WARN_ON(!list_empty(&file_priv->event_list));
461 put_pid(file_priv->pid);
464 /* ========================================================
465 * End inline drm_release
468 if (!--dev->open_count) {
469 retcode = drm_lastclose(dev);
470 if (drm_device_is_unplugged(dev))
473 mutex_unlock(&drm_global_mutex);
475 drm_minor_release(minor);
479 EXPORT_SYMBOL(drm_release);
481 ssize_t drm_read(struct file *filp, char __user *buffer,
482 size_t count, loff_t *offset)
484 struct drm_file *file_priv = filp->private_data;
485 struct drm_device *dev = file_priv->minor->dev;
488 if (!access_ok(VERIFY_WRITE, buffer, count))
491 spin_lock_irq(&dev->event_lock);
493 if (list_empty(&file_priv->event_list)) {
497 if (filp->f_flags & O_NONBLOCK) {
502 spin_unlock_irq(&dev->event_lock);
503 ret = wait_event_interruptible(file_priv->event_wait,
504 !list_empty(&file_priv->event_list));
505 spin_lock_irq(&dev->event_lock);
511 struct drm_pending_event *e;
513 e = list_first_entry(&file_priv->event_list,
514 struct drm_pending_event, link);
515 if (e->event->length + ret > count)
518 if (__copy_to_user_inatomic(buffer + ret,
519 e->event, e->event->length)) {
525 file_priv->event_space += e->event->length;
526 ret += e->event->length;
531 spin_unlock_irq(&dev->event_lock);
535 EXPORT_SYMBOL(drm_read);
537 unsigned int drm_poll(struct file *filp, struct poll_table_struct *wait)
539 struct drm_file *file_priv = filp->private_data;
540 unsigned int mask = 0;
542 poll_wait(filp, &file_priv->event_wait, wait);
544 if (!list_empty(&file_priv->event_list))
545 mask |= POLLIN | POLLRDNORM;
549 EXPORT_SYMBOL(drm_poll);