]> git.karo-electronics.de Git - mv-sheeva.git/blob - drivers/char/drm/drm_fops.c
e0124a9ba14a41457b4f4f382880876ec19c990a
[mv-sheeva.git] / drivers / char / drm / drm_fops.c
1 /**
2  * \file drm_fops.c
3  * File operations for DRM
4  *
5  * \author Rickard E. (Rik) Faith <faith@valinux.com>
6  * \author Daryll Strauss <daryll@valinux.com>
7  * \author Gareth Hughes <gareth@valinux.com>
8  */
9
10 /*
11  * Created: Mon Jan  4 08:58:31 1999 by faith@valinux.com
12  *
13  * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
14  * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
15  * All Rights Reserved.
16  *
17  * Permission is hereby granted, free of charge, to any person obtaining a
18  * copy of this software and associated documentation files (the "Software"),
19  * to deal in the Software without restriction, including without limitation
20  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
21  * and/or sell copies of the Software, and to permit persons to whom the
22  * Software is furnished to do so, subject to the following conditions:
23  *
24  * The above copyright notice and this permission notice (including the next
25  * paragraph) shall be included in all copies or substantial portions of the
26  * Software.
27  *
28  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
29  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
30  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
31  * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
32  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
33  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
34  * OTHER DEALINGS IN THE SOFTWARE.
35  */
36
37 #include "drmP.h"
38 #include <linux/poll.h>
39
40 static int drm_open_helper(struct inode *inode, struct file *filp,
41                            drm_device_t * dev);
42
43 static int drm_setup(drm_device_t * dev)
44 {
45         int i;
46         int ret;
47
48         if (dev->driver->presetup) {
49                 ret = dev->driver->presetup(dev);
50                 if (ret != 0)
51                         return ret;
52         }
53
54         atomic_set(&dev->ioctl_count, 0);
55         atomic_set(&dev->vma_count, 0);
56         dev->buf_use = 0;
57         atomic_set(&dev->buf_alloc, 0);
58
59         if (drm_core_check_feature(dev, DRIVER_HAVE_DMA)) {
60                 i = drm_dma_setup(dev);
61                 if (i < 0)
62                         return i;
63         }
64
65         for (i = 0; i < DRM_ARRAY_SIZE(dev->counts); i++)
66                 atomic_set(&dev->counts[i], 0);
67
68         for (i = 0; i < DRM_HASH_SIZE; i++) {
69                 dev->magiclist[i].head = NULL;
70                 dev->magiclist[i].tail = NULL;
71         }
72
73         dev->ctxlist = drm_alloc(sizeof(*dev->ctxlist), DRM_MEM_CTXLIST);
74         if (dev->ctxlist == NULL)
75                 return -ENOMEM;
76         memset(dev->ctxlist, 0, sizeof(*dev->ctxlist));
77         INIT_LIST_HEAD(&dev->ctxlist->head);
78
79         dev->vmalist = NULL;
80         dev->sigdata.lock = dev->lock.hw_lock = NULL;
81         init_waitqueue_head(&dev->lock.lock_queue);
82         dev->queue_count = 0;
83         dev->queue_reserved = 0;
84         dev->queue_slots = 0;
85         dev->queuelist = NULL;
86         dev->irq_enabled = 0;
87         dev->context_flag = 0;
88         dev->interrupt_flag = 0;
89         dev->dma_flag = 0;
90         dev->last_context = 0;
91         dev->last_switch = 0;
92         dev->last_checked = 0;
93         init_waitqueue_head(&dev->context_wait);
94         dev->if_version = 0;
95
96         dev->ctx_start = 0;
97         dev->lck_start = 0;
98
99         dev->buf_rp = dev->buf;
100         dev->buf_wp = dev->buf;
101         dev->buf_end = dev->buf + DRM_BSZ;
102         dev->buf_async = NULL;
103         init_waitqueue_head(&dev->buf_readers);
104         init_waitqueue_head(&dev->buf_writers);
105
106         DRM_DEBUG("\n");
107
108         /*
109          * The kernel's context could be created here, but is now created
110          * in drm_dma_enqueue.  This is more resource-efficient for
111          * hardware that does not do DMA, but may mean that
112          * drm_select_queue fails between the time the interrupt is
113          * initialized and the time the queues are initialized.
114          */
115         if (dev->driver->postsetup)
116                 dev->driver->postsetup(dev);
117
118         return 0;
119 }
120
121 /**
122  * Open file.
123  *
124  * \param inode device inode
125  * \param filp file pointer.
126  * \return zero on success or a negative number on failure.
127  *
128  * Searches the DRM device with the same minor number, calls open_helper(), and
129  * increments the device open count. If the open count was previous at zero,
130  * i.e., it's the first that the device is open, then calls setup().
131  */
132 int drm_open(struct inode *inode, struct file *filp)
133 {
134         drm_device_t *dev = NULL;
135         int minor = iminor(inode);
136         int retcode = 0;
137
138         if (!((minor >= 0) && (minor < drm_cards_limit)))
139                 return -ENODEV;
140
141         if (!drm_heads[minor])
142                 return -ENODEV;
143
144         if (!(dev = drm_heads[minor]->dev))
145                 return -ENODEV;
146
147         retcode = drm_open_helper(inode, filp, dev);
148         if (!retcode) {
149                 atomic_inc(&dev->counts[_DRM_STAT_OPENS]);
150                 spin_lock(&dev->count_lock);
151                 if (!dev->open_count++) {
152                         spin_unlock(&dev->count_lock);
153                         return drm_setup(dev);
154                 }
155                 spin_unlock(&dev->count_lock);
156         }
157
158         return retcode;
159 }
160
161 EXPORT_SYMBOL(drm_open);
162
163 /**
164  * Release file.
165  *
166  * \param inode device inode
167  * \param filp file pointer.
168  * \return zero on success or a negative number on failure.
169  *
170  * If the hardware lock is held then free it, and take it again for the kernel
171  * context since it's necessary to reclaim buffers. Unlink the file private
172  * data from its list and free it. Decreases the open count and if it reaches
173  * zero calls takedown().
174  */
175 int drm_release(struct inode *inode, struct file *filp)
176 {
177         drm_file_t *priv = filp->private_data;
178         drm_device_t *dev;
179         int retcode = 0;
180
181         lock_kernel();
182         dev = priv->head->dev;
183
184         DRM_DEBUG("open_count = %d\n", dev->open_count);
185
186         if (dev->driver->prerelease)
187                 dev->driver->prerelease(dev, filp);
188
189         /* ========================================================
190          * Begin inline drm_release
191          */
192
193         DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %d\n",
194                   current->pid, (long)old_encode_dev(priv->head->device),
195                   dev->open_count);
196
197         if (priv->lock_count && dev->lock.hw_lock &&
198             _DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock) &&
199             dev->lock.filp == filp) {
200                 DRM_DEBUG("File %p released, freeing lock for context %d\n",
201                           filp, _DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock));
202
203                 if (dev->driver->release)
204                         dev->driver->release(dev, filp);
205
206                 drm_lock_free(dev, &dev->lock.hw_lock->lock,
207                               _DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock));
208
209                 /* FIXME: may require heavy-handed reset of
210                    hardware at this point, possibly
211                    processed via a callback to the X
212                    server. */
213         } else if (dev->driver->release && priv->lock_count
214                    && dev->lock.hw_lock) {
215                 /* The lock is required to reclaim buffers */
216                 DECLARE_WAITQUEUE(entry, current);
217
218                 add_wait_queue(&dev->lock.lock_queue, &entry);
219                 for (;;) {
220                         __set_current_state(TASK_INTERRUPTIBLE);
221                         if (!dev->lock.hw_lock) {
222                                 /* Device has been unregistered */
223                                 retcode = -EINTR;
224                                 break;
225                         }
226                         if (drm_lock_take(&dev->lock.hw_lock->lock,
227                                           DRM_KERNEL_CONTEXT)) {
228                                 dev->lock.filp = filp;
229                                 dev->lock.lock_time = jiffies;
230                                 atomic_inc(&dev->counts[_DRM_STAT_LOCKS]);
231                                 break;  /* Got lock */
232                         }
233                         /* Contention */
234                         schedule();
235                         if (signal_pending(current)) {
236                                 retcode = -ERESTARTSYS;
237                                 break;
238                         }
239                 }
240                 __set_current_state(TASK_RUNNING);
241                 remove_wait_queue(&dev->lock.lock_queue, &entry);
242                 if (!retcode) {
243                         if (dev->driver->release)
244                                 dev->driver->release(dev, filp);
245                         drm_lock_free(dev, &dev->lock.hw_lock->lock,
246                                       DRM_KERNEL_CONTEXT);
247                 }
248         }
249
250         if (drm_core_check_feature(dev, DRIVER_HAVE_DMA)
251             && !dev->driver->release) {
252                 dev->driver->reclaim_buffers(dev, filp);
253         }
254
255         drm_fasync(-1, filp, 0);
256
257         down(&dev->ctxlist_sem);
258         if (dev->ctxlist && (!list_empty(&dev->ctxlist->head))) {
259                 drm_ctx_list_t *pos, *n;
260
261                 list_for_each_entry_safe(pos, n, &dev->ctxlist->head, head) {
262                         if (pos->tag == priv &&
263                             pos->handle != DRM_KERNEL_CONTEXT) {
264                                 if (dev->driver->context_dtor)
265                                         dev->driver->context_dtor(dev,
266                                                                   pos->handle);
267
268                                 drm_ctxbitmap_free(dev, pos->handle);
269
270                                 list_del(&pos->head);
271                                 drm_free(pos, sizeof(*pos), DRM_MEM_CTXLIST);
272                                 --dev->ctx_count;
273                         }
274                 }
275         }
276         up(&dev->ctxlist_sem);
277
278         down(&dev->struct_sem);
279         if (priv->remove_auth_on_close == 1) {
280                 drm_file_t *temp = dev->file_first;
281                 while (temp) {
282                         temp->authenticated = 0;
283                         temp = temp->next;
284                 }
285         }
286         if (priv->prev) {
287                 priv->prev->next = priv->next;
288         } else {
289                 dev->file_first = priv->next;
290         }
291         if (priv->next) {
292                 priv->next->prev = priv->prev;
293         } else {
294                 dev->file_last = priv->prev;
295         }
296         up(&dev->struct_sem);
297
298         if (dev->driver->free_filp_priv)
299                 dev->driver->free_filp_priv(dev, priv);
300
301         drm_free(priv, sizeof(*priv), DRM_MEM_FILES);
302
303         /* ========================================================
304          * End inline drm_release
305          */
306
307         atomic_inc(&dev->counts[_DRM_STAT_CLOSES]);
308         spin_lock(&dev->count_lock);
309         if (!--dev->open_count) {
310                 if (atomic_read(&dev->ioctl_count) || dev->blocked) {
311                         DRM_ERROR("Device busy: %d %d\n",
312                                   atomic_read(&dev->ioctl_count), dev->blocked);
313                         spin_unlock(&dev->count_lock);
314                         unlock_kernel();
315                         return -EBUSY;
316                 }
317                 spin_unlock(&dev->count_lock);
318                 unlock_kernel();
319                 return drm_takedown(dev);
320         }
321         spin_unlock(&dev->count_lock);
322
323         unlock_kernel();
324
325         return retcode;
326 }
327
328 EXPORT_SYMBOL(drm_release);
329
330 /**
331  * Called whenever a process opens /dev/drm.
332  *
333  * \param inode device inode.
334  * \param filp file pointer.
335  * \param dev device.
336  * \return zero on success or a negative number on failure.
337  *
338  * Creates and initializes a drm_file structure for the file private data in \p
339  * filp and add it into the double linked list in \p dev.
340  */
341 static int drm_open_helper(struct inode *inode, struct file *filp,
342                            drm_device_t * dev)
343 {
344         int minor = iminor(inode);
345         drm_file_t *priv;
346         int ret;
347
348         if (filp->f_flags & O_EXCL)
349                 return -EBUSY;  /* No exclusive opens */
350         if (!drm_cpu_valid())
351                 return -EINVAL;
352
353         DRM_DEBUG("pid = %d, minor = %d\n", current->pid, minor);
354
355         priv = drm_alloc(sizeof(*priv), DRM_MEM_FILES);
356         if (!priv)
357                 return -ENOMEM;
358
359         memset(priv, 0, sizeof(*priv));
360         filp->private_data = priv;
361         priv->uid = current->euid;
362         priv->pid = current->pid;
363         priv->minor = minor;
364         priv->head = drm_heads[minor];
365         priv->ioctl_count = 0;
366         priv->authenticated = capable(CAP_SYS_ADMIN);
367         priv->lock_count = 0;
368
369         if (dev->driver->open_helper) {
370                 ret = dev->driver->open_helper(dev, priv);
371                 if (ret < 0)
372                         goto out_free;
373         }
374
375         down(&dev->struct_sem);
376         if (!dev->file_last) {
377                 priv->next = NULL;
378                 priv->prev = NULL;
379                 dev->file_first = priv;
380                 dev->file_last = priv;
381         } else {
382                 priv->next = NULL;
383                 priv->prev = dev->file_last;
384                 dev->file_last->next = priv;
385                 dev->file_last = priv;
386         }
387         up(&dev->struct_sem);
388
389 #ifdef __alpha__
390         /*
391          * Default the hose
392          */
393         if (!dev->hose) {
394                 struct pci_dev *pci_dev;
395                 pci_dev = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, NULL);
396                 if (pci_dev) {
397                         dev->hose = pci_dev->sysdata;
398                         pci_dev_put(pci_dev);
399                 }
400                 if (!dev->hose) {
401                         struct pci_bus *b = pci_bus_b(pci_root_buses.next);
402                         if (b)
403                                 dev->hose = b->sysdata;
404                 }
405         }
406 #endif
407
408         return 0;
409       out_free:
410         drm_free(priv, sizeof(*priv), DRM_MEM_FILES);
411         filp->private_data = NULL;
412         return ret;
413 }
414
415 /** No-op. */
416 int drm_flush(struct file *filp)
417 {
418         drm_file_t *priv = filp->private_data;
419         drm_device_t *dev = priv->head->dev;
420
421         DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %d\n",
422                   current->pid, (long)old_encode_dev(priv->head->device),
423                   dev->open_count);
424         return 0;
425 }
426
427 EXPORT_SYMBOL(drm_flush);
428
429 /** No-op. */
430 int drm_fasync(int fd, struct file *filp, int on)
431 {
432         drm_file_t *priv = filp->private_data;
433         drm_device_t *dev = priv->head->dev;
434         int retcode;
435
436         DRM_DEBUG("fd = %d, device = 0x%lx\n", fd,
437                   (long)old_encode_dev(priv->head->device));
438         retcode = fasync_helper(fd, filp, on, &dev->buf_async);
439         if (retcode < 0)
440                 return retcode;
441         return 0;
442 }
443
444 EXPORT_SYMBOL(drm_fasync);
445
446 /** No-op. */
447 unsigned int drm_poll(struct file *filp, struct poll_table_struct *wait)
448 {
449         return 0;
450 }
451
452 EXPORT_SYMBOL(drm_poll);