3 * File operations for DRM
5 * \author Rickard E. (Rik) Faith <faith@valinux.com>
6 * \author Daryll Strauss <daryll@valinux.com>
7 * \author Gareth Hughes <gareth@valinux.com>
11 * Created: Mon Jan 4 08:58:31 1999 by faith@valinux.com
13 * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
14 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
15 * All Rights Reserved.
17 * Permission is hereby granted, free of charge, to any person obtaining a
18 * copy of this software and associated documentation files (the "Software"),
19 * to deal in the Software without restriction, including without limitation
20 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
21 * and/or sell copies of the Software, and to permit persons to whom the
22 * Software is furnished to do so, subject to the following conditions:
24 * The above copyright notice and this permission notice (including the next
25 * paragraph) shall be included in all copies or substantial portions of the
28 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
29 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
30 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
31 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
32 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
33 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
34 * OTHER DEALINGS IN THE SOFTWARE.
38 #include <linux/poll.h>
39 #include <linux/slab.h>
40 #include <linux/module.h>
41 #include "drm_legacy.h"
42 #include "drm_internal.h"
43 #include "drm_crtc_internal.h"
45 /* from BKL pushdown */
46 DEFINE_MUTEX(drm_global_mutex);
49 * DOC: file operations
51 * Drivers must define the file operations structure that forms the DRM
52 * userspace API entry point, even though most of those operations are
53 * implemented in the DRM core. The mandatory functions are drm_open(),
54 * drm_read(), drm_ioctl() and drm_compat_ioctl() if CONFIG_COMPAT is enabled
55 * (note that drm_compat_ioctl will be NULL if CONFIG_COMPAT=n). Drivers which
56 * implement private ioctls that require 32/64 bit compatibility support must
57 * provide their own .compat_ioctl() handler that processes private ioctls and
58 * calls drm_compat_ioctl() for core ioctls.
60 * In addition drm_read() and drm_poll() provide support for DRM events. DRM
61 * events are a generic and extensible means to send asynchronous events to
62 * userspace through the file descriptor. They are used to send vblank event and
63 * page flip completions by the KMS API. But drivers can also use it for their
64 * own needs, e.g. to signal completion of rendering.
66 * The memory mapping implementation will vary depending on how the driver
67 * manages memory. Legacy drivers will use the deprecated drm_legacy_mmap()
68 * function, modern drivers should use one of the provided memory-manager
69 * specific implementations. For GEM-based drivers this is drm_gem_mmap().
71 * No other file operations are supported by the DRM userspace API. Overall the
72 * following is an example #file_operations structure::
74 * static const example_drm_fops = {
75 * .owner = THIS_MODULE,
77 * .release = drm_release,
78 * .unlocked_ioctl = drm_ioctl,
79 * .compat_ioctl = drm_compat_ioctl, // NULL if CONFIG_COMPAT=n
82 * .llseek = no_llseek,
83 * .mmap = drm_gem_mmap,
87 static int drm_open_helper(struct file *filp, struct drm_minor *minor);
89 static int drm_setup(struct drm_device * dev)
93 if (dev->driver->firstopen &&
94 drm_core_check_feature(dev, DRIVER_LEGACY)) {
95 ret = dev->driver->firstopen(dev);
100 ret = drm_legacy_dma_setup(dev);
110 * drm_open - open method for DRM file
111 * @inode: device inode
112 * @filp: file pointer.
114 * This function must be used by drivers as their .open() #file_operations
115 * method. It looks up the correct DRM device and instantiates all the per-file
120 * 0 on success or negative errno value on falure.
122 int drm_open(struct inode *inode, struct file *filp)
124 struct drm_device *dev;
125 struct drm_minor *minor;
129 minor = drm_minor_acquire(iminor(inode));
131 return PTR_ERR(minor);
134 if (!dev->open_count++)
137 /* share address_space across all char-devs of a single device */
138 filp->f_mapping = dev->anon_inode->i_mapping;
140 retcode = drm_open_helper(filp, minor);
144 retcode = drm_setup(dev);
152 drm_minor_release(minor);
155 EXPORT_SYMBOL(drm_open);
158 * Check whether DRI will run on this CPU.
160 * \return non-zero if the DRI will run on this CPU, or zero otherwise.
162 static int drm_cpu_valid(void)
164 #if defined(__sparc__) && !defined(__sparc_v9__)
165 return 0; /* No cmpxchg before v9 sparc. */
171 * Called whenever a process opens /dev/drm.
173 * \param filp file pointer.
174 * \param minor acquired minor-object.
175 * \return zero on success or a negative number on failure.
177 * Creates and initializes a drm_file structure for the file private data in \p
178 * filp and add it into the double linked list in \p dev.
180 static int drm_open_helper(struct file *filp, struct drm_minor *minor)
182 struct drm_device *dev = minor->dev;
183 struct drm_file *priv;
186 if (filp->f_flags & O_EXCL)
187 return -EBUSY; /* No exclusive opens */
188 if (!drm_cpu_valid())
190 if (dev->switch_power_state != DRM_SWITCH_POWER_ON && dev->switch_power_state != DRM_SWITCH_POWER_DYNAMIC_OFF)
193 DRM_DEBUG("pid = %d, minor = %d\n", task_pid_nr(current), minor->index);
195 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
199 filp->private_data = priv;
201 priv->pid = get_pid(task_pid(current));
204 /* for compatibility root is always authenticated */
205 priv->authenticated = capable(CAP_SYS_ADMIN);
206 priv->lock_count = 0;
208 INIT_LIST_HEAD(&priv->lhead);
209 INIT_LIST_HEAD(&priv->fbs);
210 mutex_init(&priv->fbs_lock);
211 INIT_LIST_HEAD(&priv->blobs);
212 INIT_LIST_HEAD(&priv->pending_event_list);
213 INIT_LIST_HEAD(&priv->event_list);
214 init_waitqueue_head(&priv->event_wait);
215 priv->event_space = 4096; /* set aside 4k for event buffer */
217 mutex_init(&priv->event_read_lock);
219 if (drm_core_check_feature(dev, DRIVER_GEM))
220 drm_gem_open(dev, priv);
222 if (drm_core_check_feature(dev, DRIVER_PRIME))
223 drm_prime_init_file_private(&priv->prime);
225 if (dev->driver->open) {
226 ret = dev->driver->open(dev, priv);
228 goto out_prime_destroy;
231 if (drm_is_primary_client(priv)) {
232 ret = drm_master_open(priv);
237 mutex_lock(&dev->filelist_mutex);
238 list_add(&priv->lhead, &dev->filelist);
239 mutex_unlock(&dev->filelist_mutex);
246 struct pci_dev *pci_dev;
247 pci_dev = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, NULL);
249 dev->hose = pci_dev->sysdata;
250 pci_dev_put(pci_dev);
253 struct pci_bus *b = list_entry(pci_root_buses.next,
254 struct pci_bus, node);
256 dev->hose = b->sysdata;
264 if (dev->driver->postclose)
265 dev->driver->postclose(dev, priv);
267 if (drm_core_check_feature(dev, DRIVER_PRIME))
268 drm_prime_destroy_file_private(&priv->prime);
269 if (drm_core_check_feature(dev, DRIVER_GEM))
270 drm_gem_release(dev, priv);
273 filp->private_data = NULL;
277 static void drm_events_release(struct drm_file *file_priv)
279 struct drm_device *dev = file_priv->minor->dev;
280 struct drm_pending_event *e, *et;
283 spin_lock_irqsave(&dev->event_lock, flags);
285 /* Unlink pending events */
286 list_for_each_entry_safe(e, et, &file_priv->pending_event_list,
288 list_del(&e->pending_link);
292 /* Remove unconsumed events */
293 list_for_each_entry_safe(e, et, &file_priv->event_list, link) {
298 spin_unlock_irqrestore(&dev->event_lock, flags);
302 * drm_legacy_dev_reinit
304 * Reinitializes a legacy/ums drm device in it's lastclose function.
306 static void drm_legacy_dev_reinit(struct drm_device *dev)
308 if (dev->irq_enabled)
309 drm_irq_uninstall(dev);
311 mutex_lock(&dev->struct_mutex);
313 drm_legacy_agp_clear(dev);
315 drm_legacy_sg_cleanup(dev);
316 drm_legacy_vma_flush(dev);
317 drm_legacy_dma_takedown(dev);
319 mutex_unlock(&dev->struct_mutex);
321 dev->sigdata.lock = NULL;
323 dev->context_flag = 0;
324 dev->last_context = 0;
327 DRM_DEBUG("lastclose completed\n");
331 * Take down the DRM device.
333 * \param dev DRM device structure.
335 * Frees every resource in \p dev.
339 void drm_lastclose(struct drm_device * dev)
343 if (dev->driver->lastclose)
344 dev->driver->lastclose(dev);
345 DRM_DEBUG("driver lastclose completed\n");
347 if (drm_core_check_feature(dev, DRIVER_LEGACY))
348 drm_legacy_dev_reinit(dev);
352 * drm_release - release method for DRM file
353 * @inode: device inode
354 * @filp: file pointer.
356 * This function must be used by drivers as their .release() #file_operations
357 * method. It frees any resources associated with the open file, and if this is
358 * the last open file for the DRM device also proceeds to call drm_lastclose().
362 * Always succeeds and returns 0.
364 int drm_release(struct inode *inode, struct file *filp)
366 struct drm_file *file_priv = filp->private_data;
367 struct drm_minor *minor = file_priv->minor;
368 struct drm_device *dev = minor->dev;
370 mutex_lock(&drm_global_mutex);
372 DRM_DEBUG("open_count = %d\n", dev->open_count);
374 mutex_lock(&dev->filelist_mutex);
375 list_del(&file_priv->lhead);
376 mutex_unlock(&dev->filelist_mutex);
378 if (dev->driver->preclose)
379 dev->driver->preclose(dev, file_priv);
381 /* ========================================================
382 * Begin inline drm_release
385 DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %d\n",
386 task_pid_nr(current),
387 (long)old_encode_dev(file_priv->minor->kdev->devt),
390 if (drm_core_check_feature(dev, DRIVER_LEGACY))
391 drm_legacy_lock_release(dev, filp);
393 if (drm_core_check_feature(dev, DRIVER_HAVE_DMA))
394 drm_legacy_reclaim_buffers(dev, file_priv);
396 drm_events_release(file_priv);
398 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
399 drm_fb_release(file_priv);
400 drm_property_destroy_user_blobs(dev, file_priv);
403 if (drm_core_check_feature(dev, DRIVER_GEM))
404 drm_gem_release(dev, file_priv);
406 drm_legacy_ctxbitmap_flush(dev, file_priv);
408 if (drm_is_primary_client(file_priv))
409 drm_master_release(file_priv);
411 if (dev->driver->postclose)
412 dev->driver->postclose(dev, file_priv);
414 if (drm_core_check_feature(dev, DRIVER_PRIME))
415 drm_prime_destroy_file_private(&file_priv->prime);
417 WARN_ON(!list_empty(&file_priv->event_list));
419 put_pid(file_priv->pid);
422 /* ========================================================
423 * End inline drm_release
426 if (!--dev->open_count) {
428 if (drm_device_is_unplugged(dev))
431 mutex_unlock(&drm_global_mutex);
433 drm_minor_release(minor);
437 EXPORT_SYMBOL(drm_release);
440 * drm_read - read method for DRM file
441 * @filp: file pointer
442 * @buffer: userspace destination pointer for the read
443 * @count: count in bytes to read
444 * @offset: offset to read
446 * This function must be used by drivers as their .read() #file_operations
447 * method iff they use DRM events for asynchronous signalling to userspace.
448 * Since events are used by the KMS API for vblank and page flip completion this
449 * means all modern display drivers must use it.
451 * @offset is ignore, DRM events are read like a pipe. Therefore drivers also
452 * must set the .llseek() #file_operation to no_llseek(). Polling support is
453 * provided by drm_poll().
455 * This function will only ever read a full event. Therefore userspace must
456 * supply a big enough buffer to fit any event to ensure forward progress. Since
457 * the maximum event space is currently 4K it's recommended to just use that for
462 * Number of bytes read (always aligned to full events, and can be 0) or a
463 * negative error code on failure.
465 ssize_t drm_read(struct file *filp, char __user *buffer,
466 size_t count, loff_t *offset)
468 struct drm_file *file_priv = filp->private_data;
469 struct drm_device *dev = file_priv->minor->dev;
472 if (!access_ok(VERIFY_WRITE, buffer, count))
475 ret = mutex_lock_interruptible(&file_priv->event_read_lock);
480 struct drm_pending_event *e = NULL;
482 spin_lock_irq(&dev->event_lock);
483 if (!list_empty(&file_priv->event_list)) {
484 e = list_first_entry(&file_priv->event_list,
485 struct drm_pending_event, link);
486 file_priv->event_space += e->event->length;
489 spin_unlock_irq(&dev->event_lock);
495 if (filp->f_flags & O_NONBLOCK) {
500 mutex_unlock(&file_priv->event_read_lock);
501 ret = wait_event_interruptible(file_priv->event_wait,
502 !list_empty(&file_priv->event_list));
504 ret = mutex_lock_interruptible(&file_priv->event_read_lock);
508 unsigned length = e->event->length;
510 if (length > count - ret) {
512 spin_lock_irq(&dev->event_lock);
513 file_priv->event_space -= length;
514 list_add(&e->link, &file_priv->event_list);
515 spin_unlock_irq(&dev->event_lock);
519 if (copy_to_user(buffer + ret, e->event, length)) {
529 mutex_unlock(&file_priv->event_read_lock);
533 EXPORT_SYMBOL(drm_read);
536 * drm_poll - poll method for DRM file
537 * @filp: file pointer
538 * @wait: poll waiter table
540 * This function must be used by drivers as their .read() #file_operations
541 * method iff they use DRM events for asynchronous signalling to userspace.
542 * Since events are used by the KMS API for vblank and page flip completion this
543 * means all modern display drivers must use it.
545 * See also drm_read().
549 * Mask of POLL flags indicating the current status of the file.
551 unsigned int drm_poll(struct file *filp, struct poll_table_struct *wait)
553 struct drm_file *file_priv = filp->private_data;
554 unsigned int mask = 0;
556 poll_wait(filp, &file_priv->event_wait, wait);
558 if (!list_empty(&file_priv->event_list))
559 mask |= POLLIN | POLLRDNORM;
563 EXPORT_SYMBOL(drm_poll);
566 * drm_event_reserve_init_locked - init a DRM event and reserve space for it
568 * @file_priv: DRM file private data
569 * @p: tracking structure for the pending event
570 * @e: actual event data to deliver to userspace
572 * This function prepares the passed in event for eventual delivery. If the event
573 * doesn't get delivered (because the IOCTL fails later on, before queuing up
574 * anything) then the even must be cancelled and freed using
575 * drm_event_cancel_free(). Successfully initialized events should be sent out
576 * using drm_send_event() or drm_send_event_locked() to signal completion of the
577 * asynchronous event to userspace.
579 * If callers embedded @p into a larger structure it must be allocated with
580 * kmalloc and @p must be the first member element.
582 * This is the locked version of drm_event_reserve_init() for callers which
583 * already hold &drm_device.event_lock.
587 * 0 on success or a negative error code on failure.
589 int drm_event_reserve_init_locked(struct drm_device *dev,
590 struct drm_file *file_priv,
591 struct drm_pending_event *p,
594 if (file_priv->event_space < e->length)
597 file_priv->event_space -= e->length;
600 list_add(&p->pending_link, &file_priv->pending_event_list);
601 p->file_priv = file_priv;
605 EXPORT_SYMBOL(drm_event_reserve_init_locked);
608 * drm_event_reserve_init - init a DRM event and reserve space for it
610 * @file_priv: DRM file private data
611 * @p: tracking structure for the pending event
612 * @e: actual event data to deliver to userspace
614 * This function prepares the passed in event for eventual delivery. If the event
615 * doesn't get delivered (because the IOCTL fails later on, before queuing up
616 * anything) then the even must be cancelled and freed using
617 * drm_event_cancel_free(). Successfully initialized events should be sent out
618 * using drm_send_event() or drm_send_event_locked() to signal completion of the
619 * asynchronous event to userspace.
621 * If callers embedded @p into a larger structure it must be allocated with
622 * kmalloc and @p must be the first member element.
624 * Callers which already hold &drm_device.event_lock should use
625 * drm_event_reserve_init_locked() instead.
629 * 0 on success or a negative error code on failure.
631 int drm_event_reserve_init(struct drm_device *dev,
632 struct drm_file *file_priv,
633 struct drm_pending_event *p,
639 spin_lock_irqsave(&dev->event_lock, flags);
640 ret = drm_event_reserve_init_locked(dev, file_priv, p, e);
641 spin_unlock_irqrestore(&dev->event_lock, flags);
645 EXPORT_SYMBOL(drm_event_reserve_init);
648 * drm_event_cancel_free - free a DRM event and release it's space
650 * @p: tracking structure for the pending event
652 * This function frees the event @p initialized with drm_event_reserve_init()
653 * and releases any allocated space.
655 void drm_event_cancel_free(struct drm_device *dev,
656 struct drm_pending_event *p)
659 spin_lock_irqsave(&dev->event_lock, flags);
661 p->file_priv->event_space += p->event->length;
662 list_del(&p->pending_link);
664 spin_unlock_irqrestore(&dev->event_lock, flags);
667 dma_fence_put(p->fence);
671 EXPORT_SYMBOL(drm_event_cancel_free);
674 * drm_send_event_locked - send DRM event to file descriptor
676 * @e: DRM event to deliver
678 * This function sends the event @e, initialized with drm_event_reserve_init(),
679 * to its associated userspace DRM file. Callers must already hold
680 * &drm_device.event_lock, see drm_send_event() for the unlocked version.
682 * Note that the core will take care of unlinking and disarming events when the
683 * corresponding DRM file is closed. Drivers need not worry about whether the
684 * DRM file for this event still exists and can call this function upon
685 * completion of the asynchronous work unconditionally.
687 void drm_send_event_locked(struct drm_device *dev, struct drm_pending_event *e)
689 assert_spin_locked(&dev->event_lock);
692 complete_all(e->completion);
693 e->completion_release(e->completion);
694 e->completion = NULL;
698 dma_fence_signal(e->fence);
699 dma_fence_put(e->fence);
707 list_del(&e->pending_link);
708 list_add_tail(&e->link,
709 &e->file_priv->event_list);
710 wake_up_interruptible(&e->file_priv->event_wait);
712 EXPORT_SYMBOL(drm_send_event_locked);
715 * drm_send_event - send DRM event to file descriptor
717 * @e: DRM event to deliver
719 * This function sends the event @e, initialized with drm_event_reserve_init(),
720 * to its associated userspace DRM file. This function acquires
721 * &drm_device.event_lock, see drm_send_event_locked() for callers which already
724 * Note that the core will take care of unlinking and disarming events when the
725 * corresponding DRM file is closed. Drivers need not worry about whether the
726 * DRM file for this event still exists and can call this function upon
727 * completion of the asynchronous work unconditionally.
729 void drm_send_event(struct drm_device *dev, struct drm_pending_event *e)
731 unsigned long irqflags;
733 spin_lock_irqsave(&dev->event_lock, irqflags);
734 drm_send_event_locked(dev, e);
735 spin_unlock_irqrestore(&dev->event_lock, irqflags);
737 EXPORT_SYMBOL(drm_send_event);