1 /* -*- c-basic-offset: 8 -*-
3 * fw-device-cdev.c - Char device for device raw access
5 * Copyright (C) 2005-2006 Kristian Hoegsberg <krh@bitplanet.net>
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software Foundation,
19 * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
22 #include <linux/module.h>
23 #include <linux/kernel.h>
24 #include <linux/wait.h>
25 #include <linux/errno.h>
26 #include <linux/device.h>
27 #include <linux/vmalloc.h>
28 #include <linux/poll.h>
29 #include <linux/delay.h>
31 #include <linux/compat.h>
32 #include <asm/uaccess.h>
33 #include "fw-transaction.h"
34 #include "fw-topology.h"
35 #include "fw-device.h"
36 #include "fw-device-cdev.h"
41 * - bus resets sends a new packet with new generation and node id
45 /* dequeue_event() just kfree()'s the event, so the event has to be
46 * the first field in the struct. */
49 struct { void *data; size_t size; } v[2];
50 struct list_head link;
55 struct fw_transaction transaction;
56 struct client *client;
57 struct fw_cdev_event_response response;
60 struct iso_interrupt {
62 struct fw_cdev_event_iso_interrupt interrupt;
66 struct fw_device *device;
68 struct list_head handler_list;
69 struct list_head request_list;
71 struct list_head event_list;
72 struct semaphore event_list_sem;
73 wait_queue_head_t wait;
75 struct fw_iso_context *iso_context;
76 struct fw_iso_buffer buffer;
77 unsigned long vm_start;
80 static inline void __user *
81 u64_to_uptr(__u64 value)
83 return (void __user *)(unsigned long)value;
87 uptr_to_u64(void __user *ptr)
89 return (__u64)(unsigned long)ptr;
92 static int fw_device_op_open(struct inode *inode, struct file *file)
94 struct fw_device *device;
95 struct client *client;
97 device = container_of(inode->i_cdev, struct fw_device, cdev);
99 client = kzalloc(sizeof *client, GFP_KERNEL);
103 client->device = fw_device_get(device);
104 INIT_LIST_HEAD(&client->event_list);
105 sema_init(&client->event_list_sem, 0);
106 INIT_LIST_HEAD(&client->handler_list);
107 INIT_LIST_HEAD(&client->request_list);
108 spin_lock_init(&client->lock);
109 init_waitqueue_head(&client->wait);
111 file->private_data = client;
116 static void queue_event(struct client *client, struct event *event,
117 void *data0, size_t size0, void *data1, size_t size1)
121 event->v[0].data = data0;
122 event->v[0].size = size0;
123 event->v[1].data = data1;
124 event->v[1].size = size1;
126 spin_lock_irqsave(&client->lock, flags);
128 list_add_tail(&event->link, &client->event_list);
130 up(&client->event_list_sem);
131 wake_up_interruptible(&client->wait);
133 spin_unlock_irqrestore(&client->lock, flags);
136 static int dequeue_event(struct client *client, char __user *buffer, size_t count)
141 int i, retval = -EFAULT;
143 if (down_interruptible(&client->event_list_sem) < 0)
146 spin_lock_irqsave(&client->lock, flags);
148 event = container_of(client->event_list.next, struct event, link);
149 list_del(&event->link);
151 spin_unlock_irqrestore(&client->lock, flags);
157 for (i = 0; i < ARRAY_SIZE(event->v) && total < count; i++) {
158 size = min(event->v[i].size, count - total);
159 if (copy_to_user(buffer + total, event->v[i].data, size))
172 fw_device_op_read(struct file *file,
173 char __user *buffer, size_t count, loff_t *offset)
175 struct client *client = file->private_data;
177 return dequeue_event(client, buffer, count);
180 static int ioctl_config_rom(struct client *client, void __user *arg)
182 struct fw_cdev_get_config_rom rom;
184 rom.length = client->device->config_rom_length;
185 memcpy(rom.data, client->device->config_rom, rom.length * 4);
186 if (copy_to_user(arg, &rom,
187 (char *)&rom.data[rom.length] - (char *)&rom))
194 complete_transaction(struct fw_card *card, int rcode,
195 void *payload, size_t length, void *data)
197 struct response *response = data;
198 struct client *client = response->client;
200 if (length < response->response.length)
201 response->response.length = length;
202 if (rcode == RCODE_COMPLETE)
203 memcpy(response->response.data, payload,
204 response->response.length);
206 response->response.type = FW_CDEV_EVENT_RESPONSE;
207 response->response.rcode = rcode;
208 queue_event(client, &response->event,
209 &response->response, sizeof response->response,
210 response->response.data, response->response.length);
213 static ssize_t ioctl_send_request(struct client *client, void __user *arg)
215 struct fw_device *device = client->device;
216 struct fw_cdev_send_request request;
217 struct response *response;
219 if (copy_from_user(&request, arg, sizeof request))
222 /* What is the biggest size we'll accept, really? */
223 if (request.length > 4096)
226 response = kmalloc(sizeof *response + request.length, GFP_KERNEL);
227 if (response == NULL)
230 response->client = client;
231 response->response.length = request.length;
232 response->response.closure = request.closure;
235 copy_from_user(response->response.data,
236 u64_to_uptr(request.data), request.length)) {
241 fw_send_request(device->card, &response->transaction,
243 device->node->node_id,
244 device->card->generation,
245 device->node->max_speed,
247 response->response.data, request.length,
248 complete_transaction, response);
251 return sizeof request + request.length;
253 return sizeof request;
256 struct address_handler {
257 struct fw_address_handler handler;
259 struct client *client;
260 struct list_head link;
264 struct fw_request *request;
268 struct list_head link;
271 struct request_event {
273 struct fw_cdev_event_request request;
277 handle_request(struct fw_card *card, struct fw_request *r,
278 int tcode, int destination, int source,
279 int generation, int speed,
280 unsigned long long offset,
281 void *payload, size_t length, void *callback_data)
283 struct address_handler *handler = callback_data;
284 struct request *request;
285 struct request_event *e;
287 struct client *client = handler->client;
289 request = kmalloc(sizeof *request, GFP_ATOMIC);
290 e = kmalloc(sizeof *e, GFP_ATOMIC);
291 if (request == NULL || e == NULL) {
294 fw_send_response(card, r, RCODE_CONFLICT_ERROR);
298 request->request = r;
299 request->data = payload;
300 request->length = length;
302 spin_lock_irqsave(&client->lock, flags);
303 request->serial = client->request_serial++;
304 list_add_tail(&request->link, &client->request_list);
305 spin_unlock_irqrestore(&client->lock, flags);
307 e->request.type = FW_CDEV_EVENT_REQUEST;
308 e->request.tcode = tcode;
309 e->request.offset = offset;
310 e->request.length = length;
311 e->request.serial = request->serial;
312 e->request.closure = handler->closure;
314 queue_event(client, &e->event,
315 &e->request, sizeof e->request, payload, length);
318 static int ioctl_allocate(struct client *client, void __user *arg)
320 struct fw_cdev_allocate request;
321 struct address_handler *handler;
323 struct fw_address_region region;
325 if (copy_from_user(&request, arg, sizeof request))
328 handler = kmalloc(sizeof *handler, GFP_KERNEL);
332 region.start = request.offset;
333 region.end = request.offset + request.length;
334 handler->handler.length = request.length;
335 handler->handler.address_callback = handle_request;
336 handler->handler.callback_data = handler;
337 handler->closure = request.closure;
338 handler->client = client;
340 if (fw_core_add_address_handler(&handler->handler, ®ion) < 0) {
345 spin_lock_irqsave(&client->lock, flags);
346 list_add_tail(&handler->link, &client->handler_list);
347 spin_unlock_irqrestore(&client->lock, flags);
352 static int ioctl_send_response(struct client *client, void __user *arg)
354 struct fw_cdev_send_response request;
358 if (copy_from_user(&request, arg, sizeof request))
361 spin_lock_irqsave(&client->lock, flags);
362 list_for_each_entry(r, &client->request_list, link) {
363 if (r->serial == request.serial) {
368 spin_unlock_irqrestore(&client->lock, flags);
370 if (&r->link == &client->request_list)
373 if (request.length < r->length)
374 r->length = request.length;
375 if (copy_from_user(r->data, u64_to_uptr(request.data), r->length))
378 fw_send_response(client->device->card, r->request, request.rcode);
386 iso_callback(struct fw_iso_context *context, int status, u32 cycle, void *data)
388 struct client *client = data;
389 struct iso_interrupt *interrupt;
391 interrupt = kzalloc(sizeof *interrupt, GFP_ATOMIC);
392 if (interrupt == NULL)
395 interrupt->interrupt.type = FW_CDEV_EVENT_ISO_INTERRUPT;
396 interrupt->interrupt.closure = 0;
397 interrupt->interrupt.cycle = cycle;
398 queue_event(client, &interrupt->event,
399 &interrupt->interrupt, sizeof interrupt->interrupt, NULL, 0);
402 static int ioctl_create_iso_context(struct client *client, void __user *arg)
404 struct fw_cdev_create_iso_context request;
406 if (copy_from_user(&request, arg, sizeof request))
409 client->iso_context = fw_iso_context_create(client->device->card,
410 FW_ISO_CONTEXT_TRANSMIT,
411 iso_callback, client);
412 if (IS_ERR(client->iso_context))
413 return PTR_ERR(client->iso_context);
418 static int ioctl_queue_iso(struct client *client, void __user *arg)
420 struct fw_cdev_queue_iso request;
421 struct fw_cdev_iso_packet __user *p, *end, *next;
422 unsigned long payload, payload_end;
425 struct fw_iso_packet packet;
429 if (client->iso_context == NULL)
431 if (copy_from_user(&request, arg, sizeof request))
434 /* If the user passes a non-NULL data pointer, has mmap()'ed
435 * the iso buffer, and the pointer points inside the buffer,
436 * we setup the payload pointers accordingly. Otherwise we
437 * set them both to 0, which will still let packets with
438 * payload_length == 0 through. In other words, if no packets
439 * use the indirect payload, the iso buffer need not be mapped
440 * and the request.data pointer is ignored.*/
442 payload = (unsigned long)request.data - client->vm_start;
443 payload_end = payload + (client->buffer.page_count << PAGE_SHIFT);
444 if (request.data == 0 || client->buffer.pages == NULL ||
445 payload >= payload_end) {
450 if (!access_ok(VERIFY_READ, request.packets, request.size))
453 p = (struct fw_cdev_iso_packet __user *)u64_to_uptr(request.packets);
454 end = (void __user *)p + request.size;
457 if (__copy_from_user(&u.packet, p, sizeof *p))
459 next = (struct fw_cdev_iso_packet __user *)
460 &p->header[u.packet.header_length / 4];
464 (u.packet.header, p->header, u.packet.header_length))
467 u.packet.header_length + u.packet.payload_length > 0)
469 if (payload + u.packet.payload_length > payload_end)
472 if (fw_iso_context_queue(client->iso_context,
473 &u.packet, &client->buffer, payload))
477 payload += u.packet.payload_length;
481 request.size -= uptr_to_u64(p) - request.packets;
482 request.packets = uptr_to_u64(p);
483 request.data = client->vm_start + payload;
485 if (copy_to_user(arg, &request, sizeof request))
491 static int ioctl_send_iso(struct client *client, void __user *arg)
493 struct fw_cdev_send_iso request;
495 if (copy_from_user(&request, arg, sizeof request))
498 return fw_iso_context_send(client->iso_context, request.channel,
499 request.speed, request.cycle);
503 dispatch_ioctl(struct client *client, unsigned int cmd, void __user *arg)
506 case FW_CDEV_IOC_GET_CONFIG_ROM:
507 return ioctl_config_rom(client, arg);
508 case FW_CDEV_IOC_SEND_REQUEST:
509 return ioctl_send_request(client, arg);
510 case FW_CDEV_IOC_ALLOCATE:
511 return ioctl_allocate(client, arg);
512 case FW_CDEV_IOC_SEND_RESPONSE:
513 return ioctl_send_response(client, arg);
514 case FW_CDEV_IOC_CREATE_ISO_CONTEXT:
515 return ioctl_create_iso_context(client, arg);
516 case FW_CDEV_IOC_QUEUE_ISO:
517 return ioctl_queue_iso(client, arg);
518 case FW_CDEV_IOC_SEND_ISO:
519 return ioctl_send_iso(client, arg);
526 fw_device_op_ioctl(struct file *file,
527 unsigned int cmd, unsigned long arg)
529 struct client *client = file->private_data;
531 return dispatch_ioctl(client, cmd, (void __user *) arg);
536 fw_device_op_compat_ioctl(struct file *file,
537 unsigned int cmd, unsigned long arg)
539 struct client *client = file->private_data;
541 return dispatch_ioctl(client, cmd, compat_ptr(arg));
545 static int fw_device_op_mmap(struct file *file, struct vm_area_struct *vma)
547 struct client *client = file->private_data;
548 enum dma_data_direction direction;
550 int page_count, retval;
552 /* FIXME: We could support multiple buffers, but we don't. */
553 if (client->buffer.pages != NULL)
556 if (!(vma->vm_flags & VM_SHARED))
559 if (vma->vm_start & ~PAGE_MASK)
562 client->vm_start = vma->vm_start;
563 size = vma->vm_end - vma->vm_start;
564 page_count = size >> PAGE_SHIFT;
565 if (size & ~PAGE_MASK)
568 if (vma->vm_flags & VM_WRITE)
569 direction = DMA_TO_DEVICE;
571 direction = DMA_FROM_DEVICE;
573 retval = fw_iso_buffer_init(&client->buffer, client->device->card,
574 page_count, direction);
578 retval = fw_iso_buffer_map(&client->buffer, vma);
580 fw_iso_buffer_destroy(&client->buffer, client->device->card);
585 static int fw_device_op_release(struct inode *inode, struct file *file)
587 struct client *client = file->private_data;
588 struct address_handler *h, *next;
589 struct request *r, *next_r;
591 if (client->buffer.pages)
592 fw_iso_buffer_destroy(&client->buffer, client->device->card);
594 if (client->iso_context)
595 fw_iso_context_destroy(client->iso_context);
597 list_for_each_entry_safe(h, next, &client->handler_list, link) {
598 fw_core_remove_address_handler(&h->handler);
602 list_for_each_entry_safe(r, next_r, &client->request_list, link) {
603 fw_send_response(client->device->card, r->request,
604 RCODE_CONFLICT_ERROR);
608 /* TODO: wait for all transactions to finish so
609 * complete_transaction doesn't try to queue up responses
610 * after we free client. */
611 while (!list_empty(&client->event_list))
612 dequeue_event(client, NULL, 0);
614 fw_device_put(client->device);
620 static unsigned int fw_device_op_poll(struct file *file, poll_table * pt)
622 struct client *client = file->private_data;
624 poll_wait(file, &client->wait, pt);
626 if (!list_empty(&client->event_list))
627 return POLLIN | POLLRDNORM;
632 const struct file_operations fw_device_ops = {
633 .owner = THIS_MODULE,
634 .open = fw_device_op_open,
635 .read = fw_device_op_read,
636 .unlocked_ioctl = fw_device_op_ioctl,
637 .poll = fw_device_op_poll,
638 .release = fw_device_op_release,
639 .mmap = fw_device_op_mmap,
642 .compat_ioctl = fw_device_op_compat_ioctl,