]> git.karo-electronics.de Git - karo-tx-linux.git/blob - drivers/vfio/vfio_iommu_spapr_tce.c
203caacf22422fc0361e5a97b9b34bda0058b58c
[karo-tx-linux.git] / drivers / vfio / vfio_iommu_spapr_tce.c
1 /*
2  * VFIO: IOMMU DMA mapping support for TCE on POWER
3  *
4  * Copyright (C) 2013 IBM Corp.  All rights reserved.
5  *     Author: Alexey Kardashevskiy <aik@ozlabs.ru>
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License version 2 as
9  * published by the Free Software Foundation.
10  *
11  * Derived from original vfio_iommu_type1.c:
12  * Copyright (C) 2012 Red Hat, Inc.  All rights reserved.
13  *     Author: Alex Williamson <alex.williamson@redhat.com>
14  */
15
16 #include <linux/module.h>
17 #include <linux/pci.h>
18 #include <linux/slab.h>
19 #include <linux/uaccess.h>
20 #include <linux/err.h>
21 #include <linux/vfio.h>
22 #include <asm/iommu.h>
23 #include <asm/tce.h>
24
25 #define DRIVER_VERSION  "0.1"
26 #define DRIVER_AUTHOR   "aik@ozlabs.ru"
27 #define DRIVER_DESC     "VFIO IOMMU SPAPR TCE"
28
29 static void tce_iommu_detach_group(void *iommu_data,
30                 struct iommu_group *iommu_group);
31
32 static long try_increment_locked_vm(long npages)
33 {
34         long ret = 0, locked, lock_limit;
35
36         if (!current || !current->mm)
37                 return -ESRCH; /* process exited */
38
39         if (!npages)
40                 return 0;
41
42         down_write(&current->mm->mmap_sem);
43         locked = current->mm->locked_vm + npages;
44         lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
45         if (locked > lock_limit && !capable(CAP_IPC_LOCK))
46                 ret = -ENOMEM;
47         else
48                 current->mm->locked_vm += npages;
49
50         pr_debug("[%d] RLIMIT_MEMLOCK +%ld %ld/%ld%s\n", current->pid,
51                         npages << PAGE_SHIFT,
52                         current->mm->locked_vm << PAGE_SHIFT,
53                         rlimit(RLIMIT_MEMLOCK),
54                         ret ? " - exceeded" : "");
55
56         up_write(&current->mm->mmap_sem);
57
58         return ret;
59 }
60
61 static void decrement_locked_vm(long npages)
62 {
63         if (!current || !current->mm || !npages)
64                 return; /* process exited */
65
66         down_write(&current->mm->mmap_sem);
67         if (WARN_ON_ONCE(npages > current->mm->locked_vm))
68                 npages = current->mm->locked_vm;
69         current->mm->locked_vm -= npages;
70         pr_debug("[%d] RLIMIT_MEMLOCK -%ld %ld/%ld\n", current->pid,
71                         npages << PAGE_SHIFT,
72                         current->mm->locked_vm << PAGE_SHIFT,
73                         rlimit(RLIMIT_MEMLOCK));
74         up_write(&current->mm->mmap_sem);
75 }
76
77 /*
78  * VFIO IOMMU fd for SPAPR_TCE IOMMU implementation
79  *
80  * This code handles mapping and unmapping of user data buffers
81  * into DMA'ble space using the IOMMU
82  */
83
84 /*
85  * The container descriptor supports only a single group per container.
86  * Required by the API as the container is not supplied with the IOMMU group
87  * at the moment of initialization.
88  */
89 struct tce_container {
90         struct mutex lock;
91         struct iommu_group *grp;
92         bool enabled;
93         unsigned long locked_pages;
94 };
95
96 static bool tce_page_is_contained(struct page *page, unsigned page_shift)
97 {
98         /*
99          * Check that the TCE table granularity is not bigger than the size of
100          * a page we just found. Otherwise the hardware can get access to
101          * a bigger memory chunk that it should.
102          */
103         return (PAGE_SHIFT + compound_order(compound_head(page))) >= page_shift;
104 }
105
106 static long tce_iommu_find_table(struct tce_container *container,
107                 phys_addr_t ioba, struct iommu_table **ptbl)
108 {
109         long i;
110         struct iommu_table_group *table_group;
111
112         table_group = iommu_group_get_iommudata(container->grp);
113         if (!table_group)
114                 return -1;
115
116         for (i = 0; i < IOMMU_TABLE_GROUP_MAX_TABLES; ++i) {
117                 struct iommu_table *tbl = table_group->tables[i];
118
119                 if (tbl) {
120                         unsigned long entry = ioba >> tbl->it_page_shift;
121                         unsigned long start = tbl->it_offset;
122                         unsigned long end = start + tbl->it_size;
123
124                         if ((start <= entry) && (entry < end)) {
125                                 *ptbl = tbl;
126                                 return i;
127                         }
128                 }
129         }
130
131         return -1;
132 }
133
134 static int tce_iommu_enable(struct tce_container *container)
135 {
136         int ret = 0;
137         unsigned long locked;
138         struct iommu_table_group *table_group;
139
140         if (!container->grp)
141                 return -ENXIO;
142
143         if (!current->mm)
144                 return -ESRCH; /* process exited */
145
146         if (container->enabled)
147                 return -EBUSY;
148
149         /*
150          * When userspace pages are mapped into the IOMMU, they are effectively
151          * locked memory, so, theoretically, we need to update the accounting
152          * of locked pages on each map and unmap.  For powerpc, the map unmap
153          * paths can be very hot, though, and the accounting would kill
154          * performance, especially since it would be difficult to impossible
155          * to handle the accounting in real mode only.
156          *
157          * To address that, rather than precisely accounting every page, we
158          * instead account for a worst case on locked memory when the iommu is
159          * enabled and disabled.  The worst case upper bound on locked memory
160          * is the size of the whole iommu window, which is usually relatively
161          * small (compared to total memory sizes) on POWER hardware.
162          *
163          * Also we don't have a nice way to fail on H_PUT_TCE due to ulimits,
164          * that would effectively kill the guest at random points, much better
165          * enforcing the limit based on the max that the guest can map.
166          *
167          * Unfortunately at the moment it counts whole tables, no matter how
168          * much memory the guest has. I.e. for 4GB guest and 4 IOMMU groups
169          * each with 2GB DMA window, 8GB will be counted here. The reason for
170          * this is that we cannot tell here the amount of RAM used by the guest
171          * as this information is only available from KVM and VFIO is
172          * KVM agnostic.
173          *
174          * So we do not allow enabling a container without a group attached
175          * as there is no way to know how much we should increment
176          * the locked_vm counter.
177          */
178         table_group = iommu_group_get_iommudata(container->grp);
179         if (!table_group)
180                 return -ENODEV;
181
182         if (!table_group->tce32_size)
183                 return -EPERM;
184
185         locked = table_group->tce32_size >> PAGE_SHIFT;
186         ret = try_increment_locked_vm(locked);
187         if (ret)
188                 return ret;
189
190         container->locked_pages = locked;
191
192         container->enabled = true;
193
194         return ret;
195 }
196
197 static void tce_iommu_disable(struct tce_container *container)
198 {
199         if (!container->enabled)
200                 return;
201
202         container->enabled = false;
203
204         if (!current->mm)
205                 return;
206
207         decrement_locked_vm(container->locked_pages);
208 }
209
210 static void *tce_iommu_open(unsigned long arg)
211 {
212         struct tce_container *container;
213
214         if (arg != VFIO_SPAPR_TCE_IOMMU) {
215                 pr_err("tce_vfio: Wrong IOMMU type\n");
216                 return ERR_PTR(-EINVAL);
217         }
218
219         container = kzalloc(sizeof(*container), GFP_KERNEL);
220         if (!container)
221                 return ERR_PTR(-ENOMEM);
222
223         mutex_init(&container->lock);
224
225         return container;
226 }
227
228 static void tce_iommu_release(void *iommu_data)
229 {
230         struct tce_container *container = iommu_data;
231
232         WARN_ON(container->grp);
233
234         if (container->grp)
235                 tce_iommu_detach_group(iommu_data, container->grp);
236
237         tce_iommu_disable(container);
238         mutex_destroy(&container->lock);
239
240         kfree(container);
241 }
242
243 static void tce_iommu_unuse_page(struct tce_container *container,
244                 unsigned long hpa)
245 {
246         struct page *page;
247
248         page = pfn_to_page(hpa >> PAGE_SHIFT);
249         put_page(page);
250 }
251
252 static int tce_iommu_clear(struct tce_container *container,
253                 struct iommu_table *tbl,
254                 unsigned long entry, unsigned long pages)
255 {
256         unsigned long oldhpa;
257         long ret;
258         enum dma_data_direction direction;
259
260         for ( ; pages; --pages, ++entry) {
261                 direction = DMA_NONE;
262                 oldhpa = 0;
263                 ret = iommu_tce_xchg(tbl, entry, &oldhpa, &direction);
264                 if (ret)
265                         continue;
266
267                 if (direction == DMA_NONE)
268                         continue;
269
270                 tce_iommu_unuse_page(container, oldhpa);
271         }
272
273         return 0;
274 }
275
276 static int tce_iommu_use_page(unsigned long tce, unsigned long *hpa)
277 {
278         struct page *page = NULL;
279         enum dma_data_direction direction = iommu_tce_direction(tce);
280
281         if (get_user_pages_fast(tce & PAGE_MASK, 1,
282                         direction != DMA_TO_DEVICE, &page) != 1)
283                 return -EFAULT;
284
285         *hpa = __pa((unsigned long) page_address(page));
286
287         return 0;
288 }
289
290 static long tce_iommu_build(struct tce_container *container,
291                 struct iommu_table *tbl,
292                 unsigned long entry, unsigned long tce, unsigned long pages,
293                 enum dma_data_direction direction)
294 {
295         long i, ret = 0;
296         struct page *page;
297         unsigned long hpa;
298         enum dma_data_direction dirtmp;
299
300         for (i = 0; i < pages; ++i) {
301                 unsigned long offset = tce & IOMMU_PAGE_MASK(tbl) & ~PAGE_MASK;
302
303                 ret = tce_iommu_use_page(tce, &hpa);
304                 if (ret)
305                         break;
306
307                 page = pfn_to_page(hpa >> PAGE_SHIFT);
308                 if (!tce_page_is_contained(page, tbl->it_page_shift)) {
309                         ret = -EPERM;
310                         break;
311                 }
312
313                 hpa |= offset;
314                 dirtmp = direction;
315                 ret = iommu_tce_xchg(tbl, entry + i, &hpa, &dirtmp);
316                 if (ret) {
317                         tce_iommu_unuse_page(container, hpa);
318                         pr_err("iommu_tce: %s failed ioba=%lx, tce=%lx, ret=%ld\n",
319                                         __func__, entry << tbl->it_page_shift,
320                                         tce, ret);
321                         break;
322                 }
323
324                 if (dirtmp != DMA_NONE)
325                         tce_iommu_unuse_page(container, hpa);
326
327                 tce += IOMMU_PAGE_SIZE(tbl);
328         }
329
330         if (ret)
331                 tce_iommu_clear(container, tbl, entry, i);
332
333         return ret;
334 }
335
336 static long tce_iommu_create_table(struct tce_container *container,
337                         struct iommu_table_group *table_group,
338                         int num,
339                         __u32 page_shift,
340                         __u64 window_size,
341                         __u32 levels,
342                         struct iommu_table **ptbl)
343 {
344         long ret, table_size;
345
346         table_size = table_group->ops->get_table_size(page_shift, window_size,
347                         levels);
348         if (!table_size)
349                 return -EINVAL;
350
351         ret = try_increment_locked_vm(table_size >> PAGE_SHIFT);
352         if (ret)
353                 return ret;
354
355         ret = table_group->ops->create_table(table_group, num,
356                         page_shift, window_size, levels, ptbl);
357
358         WARN_ON(!ret && !(*ptbl)->it_ops->free);
359         WARN_ON(!ret && ((*ptbl)->it_allocated_size != table_size));
360
361         if (ret)
362                 decrement_locked_vm(table_size >> PAGE_SHIFT);
363
364         return ret;
365 }
366
367 static void tce_iommu_free_table(struct iommu_table *tbl)
368 {
369         unsigned long pages = tbl->it_allocated_size >> PAGE_SHIFT;
370
371         tbl->it_ops->free(tbl);
372         decrement_locked_vm(pages);
373 }
374
375 static long tce_iommu_ioctl(void *iommu_data,
376                                  unsigned int cmd, unsigned long arg)
377 {
378         struct tce_container *container = iommu_data;
379         unsigned long minsz;
380         long ret;
381
382         switch (cmd) {
383         case VFIO_CHECK_EXTENSION:
384                 switch (arg) {
385                 case VFIO_SPAPR_TCE_IOMMU:
386                         ret = 1;
387                         break;
388                 default:
389                         ret = vfio_spapr_iommu_eeh_ioctl(NULL, cmd, arg);
390                         break;
391                 }
392
393                 return (ret < 0) ? 0 : ret;
394
395         case VFIO_IOMMU_SPAPR_TCE_GET_INFO: {
396                 struct vfio_iommu_spapr_tce_info info;
397                 struct iommu_table_group *table_group;
398
399                 if (WARN_ON(!container->grp))
400                         return -ENXIO;
401
402                 table_group = iommu_group_get_iommudata(container->grp);
403
404                 if (!table_group)
405                         return -ENXIO;
406
407                 minsz = offsetofend(struct vfio_iommu_spapr_tce_info,
408                                 dma32_window_size);
409
410                 if (copy_from_user(&info, (void __user *)arg, minsz))
411                         return -EFAULT;
412
413                 if (info.argsz < minsz)
414                         return -EINVAL;
415
416                 info.dma32_window_start = table_group->tce32_start;
417                 info.dma32_window_size = table_group->tce32_size;
418                 info.flags = 0;
419
420                 if (copy_to_user((void __user *)arg, &info, minsz))
421                         return -EFAULT;
422
423                 return 0;
424         }
425         case VFIO_IOMMU_MAP_DMA: {
426                 struct vfio_iommu_type1_dma_map param;
427                 struct iommu_table *tbl = NULL;
428                 long num;
429                 enum dma_data_direction direction;
430
431                 if (!container->enabled)
432                         return -EPERM;
433
434                 minsz = offsetofend(struct vfio_iommu_type1_dma_map, size);
435
436                 if (copy_from_user(&param, (void __user *)arg, minsz))
437                         return -EFAULT;
438
439                 if (param.argsz < minsz)
440                         return -EINVAL;
441
442                 if (param.flags & ~(VFIO_DMA_MAP_FLAG_READ |
443                                 VFIO_DMA_MAP_FLAG_WRITE))
444                         return -EINVAL;
445
446                 num = tce_iommu_find_table(container, param.iova, &tbl);
447                 if (num < 0)
448                         return -ENXIO;
449
450                 if ((param.size & ~IOMMU_PAGE_MASK(tbl)) ||
451                                 (param.vaddr & ~IOMMU_PAGE_MASK(tbl)))
452                         return -EINVAL;
453
454                 /* iova is checked by the IOMMU API */
455                 if (param.flags & VFIO_DMA_MAP_FLAG_READ) {
456                         if (param.flags & VFIO_DMA_MAP_FLAG_WRITE)
457                                 direction = DMA_BIDIRECTIONAL;
458                         else
459                                 direction = DMA_TO_DEVICE;
460                 } else {
461                         if (param.flags & VFIO_DMA_MAP_FLAG_WRITE)
462                                 direction = DMA_FROM_DEVICE;
463                         else
464                                 return -EINVAL;
465                 }
466
467                 ret = iommu_tce_put_param_check(tbl, param.iova, param.vaddr);
468                 if (ret)
469                         return ret;
470
471                 ret = tce_iommu_build(container, tbl,
472                                 param.iova >> tbl->it_page_shift,
473                                 param.vaddr,
474                                 param.size >> tbl->it_page_shift,
475                                 direction);
476
477                 iommu_flush_tce(tbl);
478
479                 return ret;
480         }
481         case VFIO_IOMMU_UNMAP_DMA: {
482                 struct vfio_iommu_type1_dma_unmap param;
483                 struct iommu_table *tbl = NULL;
484                 long num;
485
486                 if (!container->enabled)
487                         return -EPERM;
488
489                 minsz = offsetofend(struct vfio_iommu_type1_dma_unmap,
490                                 size);
491
492                 if (copy_from_user(&param, (void __user *)arg, minsz))
493                         return -EFAULT;
494
495                 if (param.argsz < minsz)
496                         return -EINVAL;
497
498                 /* No flag is supported now */
499                 if (param.flags)
500                         return -EINVAL;
501
502                 num = tce_iommu_find_table(container, param.iova, &tbl);
503                 if (num < 0)
504                         return -ENXIO;
505
506                 if (param.size & ~IOMMU_PAGE_MASK(tbl))
507                         return -EINVAL;
508
509                 ret = iommu_tce_clear_param_check(tbl, param.iova, 0,
510                                 param.size >> tbl->it_page_shift);
511                 if (ret)
512                         return ret;
513
514                 ret = tce_iommu_clear(container, tbl,
515                                 param.iova >> tbl->it_page_shift,
516                                 param.size >> tbl->it_page_shift);
517                 iommu_flush_tce(tbl);
518
519                 return ret;
520         }
521         case VFIO_IOMMU_ENABLE:
522                 mutex_lock(&container->lock);
523                 ret = tce_iommu_enable(container);
524                 mutex_unlock(&container->lock);
525                 return ret;
526
527
528         case VFIO_IOMMU_DISABLE:
529                 mutex_lock(&container->lock);
530                 tce_iommu_disable(container);
531                 mutex_unlock(&container->lock);
532                 return 0;
533         case VFIO_EEH_PE_OP:
534                 if (!container->grp)
535                         return -ENODEV;
536
537                 return vfio_spapr_iommu_eeh_ioctl(container->grp,
538                                                   cmd, arg);
539         }
540
541         return -ENOTTY;
542 }
543
544 static void tce_iommu_release_ownership(struct tce_container *container,
545                 struct iommu_table_group *table_group)
546 {
547         int i;
548
549         for (i = 0; i < IOMMU_TABLE_GROUP_MAX_TABLES; ++i) {
550                 struct iommu_table *tbl = table_group->tables[i];
551
552                 if (!tbl)
553                         continue;
554
555                 tce_iommu_clear(container, tbl, tbl->it_offset, tbl->it_size);
556                 if (tbl->it_map)
557                         iommu_release_ownership(tbl);
558         }
559 }
560
561 static int tce_iommu_take_ownership(struct tce_container *container,
562                 struct iommu_table_group *table_group)
563 {
564         int i, j, rc = 0;
565
566         for (i = 0; i < IOMMU_TABLE_GROUP_MAX_TABLES; ++i) {
567                 struct iommu_table *tbl = table_group->tables[i];
568
569                 if (!tbl || !tbl->it_map)
570                         continue;
571
572                 rc = iommu_take_ownership(tbl);
573                 if (rc) {
574                         for (j = 0; j < i; ++j)
575                                 iommu_release_ownership(
576                                                 table_group->tables[j]);
577
578                         return rc;
579                 }
580         }
581
582         return 0;
583 }
584
585 static void tce_iommu_release_ownership_ddw(struct tce_container *container,
586                 struct iommu_table_group *table_group)
587 {
588         long i;
589
590         if (!table_group->ops->unset_window) {
591                 WARN_ON_ONCE(1);
592                 return;
593         }
594
595         for (i = 0; i < IOMMU_TABLE_GROUP_MAX_TABLES; ++i) {
596                 /* Store table pointer as unset_window resets it */
597                 struct iommu_table *tbl = table_group->tables[i];
598
599                 if (!tbl)
600                         continue;
601
602                 table_group->ops->unset_window(table_group, i);
603                 tce_iommu_clear(container, tbl,
604                                 tbl->it_offset, tbl->it_size);
605                 tce_iommu_free_table(tbl);
606         }
607
608         table_group->ops->release_ownership(table_group);
609 }
610
611 static long tce_iommu_take_ownership_ddw(struct tce_container *container,
612                 struct iommu_table_group *table_group)
613 {
614         long ret;
615         struct iommu_table *tbl = NULL;
616
617         if (!table_group->ops->create_table || !table_group->ops->set_window ||
618                         !table_group->ops->release_ownership) {
619                 WARN_ON_ONCE(1);
620                 return -EFAULT;
621         }
622
623         table_group->ops->take_ownership(table_group);
624
625         ret = tce_iommu_create_table(container,
626                         table_group,
627                         0, /* window number */
628                         IOMMU_PAGE_SHIFT_4K,
629                         table_group->tce32_size,
630                         1, /* default levels */
631                         &tbl);
632         if (!ret) {
633                 ret = table_group->ops->set_window(table_group, 0, tbl);
634                 if (ret)
635                         tce_iommu_free_table(tbl);
636                 else
637                         table_group->tables[0] = tbl;
638         }
639
640         if (ret)
641                 table_group->ops->release_ownership(table_group);
642
643         return ret;
644 }
645
646 static int tce_iommu_attach_group(void *iommu_data,
647                 struct iommu_group *iommu_group)
648 {
649         int ret;
650         struct tce_container *container = iommu_data;
651         struct iommu_table_group *table_group;
652
653         mutex_lock(&container->lock);
654
655         /* pr_debug("tce_vfio: Attaching group #%u to iommu %p\n",
656                         iommu_group_id(iommu_group), iommu_group); */
657         if (container->grp) {
658                 pr_warn("tce_vfio: Only one group per IOMMU container is allowed, existing id=%d, attaching id=%d\n",
659                                 iommu_group_id(container->grp),
660                                 iommu_group_id(iommu_group));
661                 ret = -EBUSY;
662                 goto unlock_exit;
663         }
664
665         if (container->enabled) {
666                 pr_err("tce_vfio: attaching group #%u to enabled container\n",
667                                 iommu_group_id(iommu_group));
668                 ret = -EBUSY;
669                 goto unlock_exit;
670         }
671
672         table_group = iommu_group_get_iommudata(iommu_group);
673         if (!table_group) {
674                 ret = -ENXIO;
675                 goto unlock_exit;
676         }
677
678         if (!table_group->ops || !table_group->ops->take_ownership ||
679                         !table_group->ops->release_ownership)
680                 ret = tce_iommu_take_ownership(container, table_group);
681         else
682                 ret = tce_iommu_take_ownership_ddw(container, table_group);
683
684         if (!ret)
685                 container->grp = iommu_group;
686
687 unlock_exit:
688         mutex_unlock(&container->lock);
689
690         return ret;
691 }
692
693 static void tce_iommu_detach_group(void *iommu_data,
694                 struct iommu_group *iommu_group)
695 {
696         struct tce_container *container = iommu_data;
697         struct iommu_table_group *table_group;
698
699         mutex_lock(&container->lock);
700         if (iommu_group != container->grp) {
701                 pr_warn("tce_vfio: detaching group #%u, expected group is #%u\n",
702                                 iommu_group_id(iommu_group),
703                                 iommu_group_id(container->grp));
704                 goto unlock_exit;
705         }
706
707         if (container->enabled) {
708                 pr_warn("tce_vfio: detaching group #%u from enabled container, forcing disable\n",
709                                 iommu_group_id(container->grp));
710                 tce_iommu_disable(container);
711         }
712
713         /* pr_debug("tce_vfio: detaching group #%u from iommu %p\n",
714            iommu_group_id(iommu_group), iommu_group); */
715         container->grp = NULL;
716
717         table_group = iommu_group_get_iommudata(iommu_group);
718         BUG_ON(!table_group);
719
720         if (!table_group->ops || !table_group->ops->release_ownership)
721                 tce_iommu_release_ownership(container, table_group);
722         else
723                 tce_iommu_release_ownership_ddw(container, table_group);
724
725 unlock_exit:
726         mutex_unlock(&container->lock);
727 }
728
729 const struct vfio_iommu_driver_ops tce_iommu_driver_ops = {
730         .name           = "iommu-vfio-powerpc",
731         .owner          = THIS_MODULE,
732         .open           = tce_iommu_open,
733         .release        = tce_iommu_release,
734         .ioctl          = tce_iommu_ioctl,
735         .attach_group   = tce_iommu_attach_group,
736         .detach_group   = tce_iommu_detach_group,
737 };
738
739 static int __init tce_iommu_init(void)
740 {
741         return vfio_register_iommu_driver(&tce_iommu_driver_ops);
742 }
743
744 static void __exit tce_iommu_cleanup(void)
745 {
746         vfio_unregister_iommu_driver(&tce_iommu_driver_ops);
747 }
748
749 module_init(tce_iommu_init);
750 module_exit(tce_iommu_cleanup);
751
752 MODULE_VERSION(DRIVER_VERSION);
753 MODULE_LICENSE("GPL v2");
754 MODULE_AUTHOR(DRIVER_AUTHOR);
755 MODULE_DESCRIPTION(DRIVER_DESC);
756